Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.46",
%%%     date            = "29 August 2024",
%%%     time            = "08:08:28 MDT",
%%%     filename        = "tist.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "https://www.math.utah.edu/~beebe",
%%%     checksum        = "10529 44888 236313 2236130",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; BibTeX; ACM Transactions on
%%%                        Intelligent Systems and Technology (TIST)",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Intelligent
%%%                        Systems and Technology (TIST) (CODEN ????,
%%%                        ISSN 2157-6904 (print), 2157-6912
%%%                        (electronic)),  covering all journal issues from
%%%                        2010 -- date.
%%%
%%%                        At version 1.46, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2010 (  15)    2015 (  86)    2020 (  73)
%%%                             2011 (  60)    2016 (  68)    2021 (  82)
%%%                             2012 (  59)    2017 (  82)    2022 ( 105)
%%%                             2013 (  95)    2018 (  59)    2023 ( 114)
%%%                             2014 (  30)    2019 (  65)    2024 (  87)
%%%
%%%                             Article:       1080
%%%
%%%                             Total entries: 1080
%%%
%%%                        The journal Web page can be found at:
%%%
%%%                            http://www.acm.org/pubs/tist
%%%                            http://portal.acm.org/citation.cfm?id=J1318
%%%
%%%                        The journal table of contents page is at:
%%%
%%%                            http://www.acm.org/pubs/contents/journals/tist/
%%%
%%%                        The initial draft was extracted from the
%%%                        journal Web site.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        Numerous errors in the sources noted above
%%%                        have been corrected.   Spelling has been
%%%                        verified with the UNIX spell and GNU ispell
%%%                        programs using the exception dictionary
%%%                        stored in the companion file with extension
%%%                        .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TIST                 = "ACM Transactions on Intelligent Systems and
                                  Technology (TIST)"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Yang:2010:IAT,
  author =       "Qiang Yang",
  title =        "Introduction to {ACM TIST}",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "1:1--1:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858949",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2010:IAT,
  author =       "Huan Liu and Dana Nau",
  title =        "Introduction to the {ACM TIST} special issue {AI} in
                 social computing and cultural modeling",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "2:1--2:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858950",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bainbridge:2010:VWC,
  author =       "William Sims Bainbridge",
  title =        "Virtual worlds as cultural models",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "3:1--3:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858951",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Feldman:2010:SCR,
  author =       "Michal Feldman and Moshe Tennenholtz",
  title =        "Structured coalitions in resource selection games",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858952",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wu:2010:OFU,
  author =       "Fang Wu and Bernardo A. Huberman",
  title =        "Opinion formation under costly expression",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858953",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Roos:2010:ESD,
  author =       "Patrick Roos and J. Ryan Carr and Dana S. Nau",
  title =        "Evolution of state-dependent risk preferences",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858954",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Goolsby:2010:SMC,
  author =       "Rebecca Goolsby",
  title =        "Social media as crisis platform: The future of
                 community maps\slash crisis maps",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "7:1--7:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858955",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2010:AIS,
  author =       "Meng Wang and Bo Liu and Xian-Sheng Hua",
  title =        "Accessible image search for colorblindness",
  journal =      j-TIST,
  volume =       "1",
  number =       "1",
  pages =        "8:1--8:??",
  month =        oct,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1858948.1858956",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Tue Nov 23 12:18:28 MST 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2010:PSI,
  author =       "Yixin Chen",
  title =        "Preface to special issue on applications of automated
                 planning",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "9:1--9:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869398",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Porteous:2010:API,
  author =       "Julie Porteous and Marc Cavazza and Fred Charles",
  title =        "Applying planning to interactive storytelling:
                 Narrative control using state constraints",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "10:1--10:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869399",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bryce:2010:PIB,
  author =       "Daniel Bryce and Michael Verdicchio and Seungchan
                 Kim",
  title =        "Planning interventions in biological networks",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "11:1--11:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869400",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Refanidis:2010:CBA,
  author =       "Ioannis Refanidis and Neil Yorke-Smith",
  title =        "A constraint-based approach to scheduling an
                 individual's activities",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "12:1--12:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869401",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Benaskeur:2010:CRT,
  author =       "Abder Rezak Benaskeur and Froduald Kabanza and Eric
                 Beaudry",
  title =        "{CORALS}: a real-time planner for anti-air defense
                 operations",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "13:1--13:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869402",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Talamadupula:2010:PHR,
  author =       "Kartik Talamadupula and J. Benton and Subbarao
                 Kambhampati and Paul Schermerhorn and Matthias
                 Scheutz",
  title =        "Planning for human-robot teaming in open worlds",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "14:1--14:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869403",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cirillo:2010:HAT,
  author =       "Marcello Cirillo and Lars Karlsson and Alessandro
                 Saffiotti",
  title =        "Human-aware task planning: An application to mobile
                 robots",
  journal =      j-TIST,
  volume =       "1",
  number =       "2",
  pages =        "15:1--15:??",
  month =        nov,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1869397.1869404",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:50 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2011:ISI,
  author =       "Daqing Zhang and Matthai Philipose and Qiang Yang",
  title =        "Introduction to the special issue on intelligent
                 systems for activity recognition",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889682",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zheng:2011:LTR,
  author =       "Yu Zheng and Xing Xie",
  title =        "Learning travel recommendations from user-generated
                 {GPS} traces",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889683",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Farrahi:2011:DRL,
  author =       "Katayoun Farrahi and Daniel Gatica-Perez",
  title =        "Discovering routines from large-scale human locations
                 using probabilistic topic models",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889684",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hsu:2011:PMC,
  author =       "Jane Yung-Jen Hsu and Chia-Chun Lian and Wan-Rong
                 Jih",
  title =        "Probabilistic models for concurrent chatting activity
                 recognition",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889685",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhou:2011:RPA,
  author =       "Yue Zhou and Bingbing Ni and Shuicheng Yan and Thomas
                 S. Huang",
  title =        "Recognizing pair-activities by causality analysis",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889686",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ward:2011:PMA,
  author =       "Jamie A. Ward and Paul Lukowicz and Hans W.
                 Gellersen",
  title =        "Performance metrics for activity recognition",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "6:1--6:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889687",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wyatt:2011:ICC,
  author =       "Danny Wyatt and Tanzeem Choudhury and Jeff Bilmes and
                 James A. Kitts",
  title =        "Inferring colocation and conversation networks from
                 privacy-sensitive audio with implications for
                 computational social science",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "7:1--7:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889688",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bao:2011:FRC,
  author =       "Xinlong Bao and Thomas G. Dietterich",
  title =        "{FolderPredictor}: Reducing the cost of reaching the
                 right folder",
  journal =      j-TIST,
  volume =       "2",
  number =       "1",
  pages =        "8:1--8:??",
  month =        jan,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1889681.1889689",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Wed Jan 26 14:40:51 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hua:2011:ISI,
  author =       "Xian-Sheng Hua and Qi Tian and Alberto {Del Bimbo} and
                 Ramesh Jain",
  title =        "Introduction to the special issue on intelligent
                 multimedia systems and technology",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899413",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2011:ALM,
  author =       "Meng Wang and Xian-Sheng Hua",
  title =        "Active learning in multimedia annotation and
                 retrieval: a survey",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899414",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Active learning is a machine learning technique that
                 selects the most informative samples for labeling and
                 uses them as training data. It has been widely explored
                 in multimedia research community for its capability of
                 reducing human annotation effort. In this article, we
                 provide a survey on the efforts of leveraging active
                 learning in multimedia annotation and retrieval. We
                 mainly focus on two application domains: image/video
                 annotation and content-based image retrieval. We first
                 briefly introduce the principle of active learning and
                 then we analyze the sample selection criteria. We
                 categorize the existing sample selection strategies
                 used in multimedia annotation and retrieval into five
                 criteria: risk reduction, uncertainty, diversity,
                 density and relevance. We then introduce several
                 classification models used in active learning-based
                 multimedia annotation and retrieval, including
                 semi-supervised learning, multilabel learning and
                 multiple instance learning. We also provide a
                 discussion on several future trends in this research
                 direction. In particular, we discuss cost analysis of
                 human annotation and large-scale interactive multimedia
                 annotation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shao:2011:VIG,
  author =       "Yuanlong Shao and Yuan Zhou and Deng Cai",
  title =        "Variational inference with graph regularization for
                 image annotation",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899415",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Image annotation is a typical area where there are
                 multiple types of attributes associated with each
                 individual image. In order to achieve better
                 performance, it is important to develop effective
                 modeling by utilizing prior knowledge. In this article,
                 we extend the graph regularization approaches to a more
                 general case where the regularization is imposed on the
                 factorized variational distributions, instead of
                 posterior distributions implicitly involved in EM-like
                 algorithms. In this way, the problem modeling can be
                 more flexible, and we can choose any factor in the
                 problem domain to impose graph regularization wherever
                 there are similarity constraints among the instances.
                 We formulate the problem formally and show its
                 geometrical background in manifold learning. We also
                 design two practically effective algorithms and analyze
                 their properties such as the convergence. Finally, we
                 apply our approach to image annotation and show the
                 performance improvement of our algorithm.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yu:2011:CBS,
  author =       "Jie Yu and Xin Jin and Jiawei Han and Jiebo Luo",
  title =        "Collection-based sparse label propagation and its
                 application on social group suggestion from photos",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "12:1--12:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899416",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Online social network services pose great
                 opportunities and challenges for many research areas.
                 In multimedia content analysis, automatic social group
                 recommendation for images holds the promise to expand
                 one's social network through media sharing. However,
                 most existing techniques cannot generate satisfactory
                 social group suggestions when the images are classified
                 independently. In this article, we present novel
                 methods to produce accurate suggestions of suitable
                 social groups from a user's personal photo collection.
                 First, an automatic clustering process is designed to
                 estimate the group similarities, select the optimal
                 number of clusters and categorize the social groups.
                 Both visual content and textual annotations are
                 integrated to generate initial predictions of the group
                 categories for the images. Next, the relationship among
                 images in a user's collection is modeled as a sparse
                 graph. A collection-based sparse label propagation
                 method is proposed to improve the group suggestions.
                 Furthermore, the sparse graph-based collection model
                 can be readily exploited to select the most influential
                 and informative samples for active relevance feedback,
                 which can be integrated with the label propagation
                 process without the need for classifier retraining. The
                 proposed methods have been tested on group suggestion
                 tasks for real user collections and demonstrated
                 superior performance over the state-of-the-art
                 techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wu:2011:DML,
  author =       "Lei Wu and Steven C. H. Hoi and Rong Jin and Jianke
                 Zhu and Nenghai Yu",
  title =        "Distance metric learning from uncertain side
                 information for automated photo tagging",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "13:1--13:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899417",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Automated photo tagging is an important technique for
                 many intelligent multimedia information systems, for
                 example, smart photo management system and intelligent
                 digital media library. To attack the challenge, several
                 machine learning techniques have been developed and
                 applied for automated photo tagging. For example,
                 supervised learning techniques have been applied to
                 automated photo tagging by training statistical
                 classifiers from a collection of manually labeled
                 examples. Although the existing approaches work well
                 for small testbeds with relatively small number of
                 annotation words, due to the long-standing challenge of
                 object recognition, they often perform poorly in
                 large-scale problems. Another limitation of the
                 existing approaches is that they require a set of
                 high-quality labeled data, which is not only expensive
                 to collect but also time consuming. In this article, we
                 investigate a social image based annotation scheme by
                 exploiting implicit side information that is available
                 for a large number of social photos from the social web
                 sites. The key challenge of our intelligent annotation
                 scheme is how to learn an effective distance metric
                 based on implicit side information (visual or textual)
                 of social photos. To this end, we present a novel
                 ``Probabilistic Distance Metric Learning'' (PDML)
                 framework, which can learn optimized metrics by
                 effectively exploiting the implicit side information
                 vastly available on the social web. We apply the
                 proposed technique to photo annotation tasks based on a
                 large social image testbed with over 1 million tagged
                 photos crawled from a social photo sharing portal.
                 Encouraging results show that the proposed technique is
                 effective and promising for social photo based
                 annotation tasks.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tang:2011:IAK,
  author =       "Jinhui Tang and Richang Hong and Shuicheng Yan and
                 Tat-Seng Chua and Guo-Jun Qi and Ramesh Jain",
  title =        "Image annotation by {$k$NN}-sparse graph-based label
                 propagation over noisily tagged web images",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "14:1--14:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899418",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we exploit the problem of annotating
                 a large-scale image corpus by label propagation over
                 noisily tagged web images. To annotate the images more
                 accurately, we propose a novel k NN-sparse graph-based
                 semi-supervised learning approach for harnessing the
                 labeled and unlabeled data simultaneously. The sparse
                 graph constructed by datum-wise one-vs- k NN sparse
                 reconstructions of all samples can remove most of the
                 semantically unrelated links among the data, and thus
                 it is more robust and discriminative than the
                 conventional graphs. Meanwhile, we apply the
                 approximate k nearest neighbors to accelerate the
                 sparse graph construction without loosing its
                 effectiveness. More importantly, we propose an
                 effective training label refinement strategy within
                 this graph-based learning framework to handle the noise
                 in the training labels, by bringing in a dual
                 regularization for both the quantity and sparsity of
                 the noise. We conduct extensive experiments on a
                 real-world image database consisting of 55,615 Flickr
                 images and noisily tagged training labels. The results
                 demonstrate both the effectiveness and efficiency of
                 the proposed approach and its capability to deal with
                 the noise in the training labels.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tong:2011:APL,
  author =       "Xiaofeng Tong and Jia Liu and Tao Wang and Yimin
                 Zhang",
  title =        "Automatic player labeling, tracking and field
                 registration and trajectory mapping in broadcast soccer
                 video",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "15:1--15:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899419",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we present a method to perform
                 automatic player trajectories mapping based on player
                 detection, unsupervised labeling, efficient
                 multi-object tracking, and playfield registration in
                 broadcast soccer videos. Player detector determines the
                 players' positions and scales by combining the ability
                 of dominant color based background subtraction and a
                 boosting detector with Haar features. We first learn
                 the dominant color with accumulate color histogram at
                 the beginning of processing, then use the player
                 detector to collect hundreds of player samples, and
                 learn player appearance codebook by unsupervised
                 clustering. In a soccer game, a player can be labeled
                 as one of four categories: two teams, referee or
                 outlier. The learning capability enables the method to
                 be generalized well to different videos without any
                 manual initialization. With the dominant color and
                 player appearance model, we can locate and label each
                 player. After that, we perform multi-object tracking by
                 using Markov Chain Monte Carlo (MCMC) data association
                 to generate player trajectories. Some data driven
                 dynamics are proposed to improve the Markov chain's
                 efficiency, such as label consistency, motion
                 consistency, and track length, etc. Finally, we extract
                 key-points and find the mapping from an image plane to
                 the standard field model, and then map players'
                 position and trajectories to the field. A large
                 quantity of experimental results on FIFA World Cup 2006
                 videos demonstrate that this method can reach high
                 detection and labeling precision, reliably tracking in
                 scenes of player occlusion, moderate camera motion and
                 pose variation, and yield promising field registration
                 results.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2011:NJD,
  author =       "Qingzhong Liu and Andrew H. Sung and Mengyu Qiao",
  title =        "Neighboring joint density-based {JPEG} steganalysis",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "16:1--16:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899420",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib/",
  abstract =     "The threat posed by hackers, spies, terrorists, and
                 criminals, etc. using steganography for stealthy
                 communications and other illegal purposes is a serious
                 concern of cyber security. Several steganographic
                 systems that have been developed and made readily
                 available utilize JPEG images as carriers. Due to the
                 popularity of JPEG images on the Internet, effective
                 steganalysis techniques are called for to counter the
                 threat of JPEG steganography. In this article, we
                 propose a new approach based on feature mining on the
                 discrete cosine transform (DCT) domain and machine
                 learning for steganalysis of JPEG images. First,
                 neighboring joint density features on both intra-block
                 and inter-block are extracted from the DCT coefficient
                 array and the absolute array, respectively; then a
                 support vector machine (SVM) is applied to the features
                 for detection. An evolving neural-fuzzy inference
                 system is employed to predict the hiding amount in JPEG
                 steganograms. We also adopt a feature selection method
                 of support vector machine recursive feature elimination
                 to reduce the number of features. Experimental results
                 show that, in detecting several JPEG-based
                 steganographic systems, our method prominently
                 outperforms the well-known Markov-process based
                 approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bhatt:2011:PTM,
  author =       "Chidansh Bhatt and Mohan Kankanhalli",
  title =        "Probabilistic temporal multimedia data mining",
  journal =      j-TIST,
  volume =       "2",
  number =       "2",
  pages =        "17:1--17:??",
  month =        feb,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1899412.1899421",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Oct 1 16:23:55 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Existing sequence pattern mining techniques assume
                 that the obtained events from event detectors are
                 accurate. However, in reality, event detectors label
                 the events from different modalities with a certain
                 probability over a time-interval. In this article, we
                 consider for the first time Probabilistic Temporal
                 Multimedia (PTM) Event data to discover accurate
                 sequence patterns. PTM event data considers the start
                 time, end time, event label and associated probability
                 for the sequence pattern discovery. As the existing
                 sequence pattern mining techniques cannot work on such
                 realistic data, we have developed a novel framework for
                 performing sequence pattern mining on probabilistic
                 temporal multimedia event data. We perform probability
                 fusion to resolve the redundancy among detected events
                 from different modalities, considering their
                 cross-modal correlation. We propose a novel sequence
                 pattern mining algorithm called Probabilistic Interval
                 based Event Miner (PIE-Miner) for discovering frequent
                 sequence patterns from interval based events. PIE-Miner
                 has a new support counting mechanism developed for PTM
                 data. Existing sequence pattern mining algorithms have
                 event label level support counting mechanism, whereas
                 we have developed event cluster level support counting
                 mechanism. We discover the complete set of all possible
                 temporal relationships based on Allen's interval
                 algebra. The experimental results showed that the
                 discovered sequence patterns are more useful than the
                 patterns discovered with state-of-the-art sequence
                 pattern mining algorithms.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ling:2011:ISI,
  author =       "Charles X. Ling",
  title =        "Introduction to special issue on machine learning for
                 business applications",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "18:1--18:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961190",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Dhar:2011:PFM,
  author =       "Vasant Dhar",
  title =        "Prediction in financial markets: The case for small
                 disjuncts",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "19:1--19:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961191",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Huang:2011:LBC,
  author =       "Szu-Hao Huang and Shang-Hong Lai and Shih-Hsien Tai",
  title =        "A learning-based contrarian trading strategy via a
                 dual-classifier model",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "20:1--20:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961192",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2011:CCD,
  author =       "Bin Li and Steven C. H. Hoi and Vivekanand
                 Gopalkrishnan",
  title =        "{CORN}: Correlation-driven nonparametric learning
                 approach for portfolio selection",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "21:1--21:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961193",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bonchi:2011:SNA,
  author =       "Francesco Bonchi and Carlos Castillo and Aristides
                 Gionis and Alejandro Jaimes",
  title =        "Social Network Analysis and Mining for Business
                 Applications",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "22:1--22:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961194",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2011:HMF,
  author =       "Richong Zhang and Thomas Tran",
  title =        "A helpfulness modeling framework for electronic
                 word-of-mouth on consumer opinion platforms",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "23:1--23:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961195",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ge:2011:MLC,
  author =       "Yong Ge and Hui Xiong and Wenjun Zhou and Siming Li
                 and Ramendra Sahoo",
  title =        "Multifocal learning for customer problem analysis",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "24:1--24:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961196",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hsu:2011:ISI,
  author =       "Chun-Nan Hsu",
  title =        "Introduction to special issue on large-scale machine
                 learning",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "25:1--25:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961197",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2011:PPL,
  author =       "Zhiyuan Liu and Yuzhou Zhang and Edward Y. Chang and
                 Maosong Sun",
  title =        "{PLDA+}: Parallel latent {Dirichlet} allocation with
                 data placement and pipeline processing",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "26:1--26:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961198",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chang:2011:LLS,
  author =       "Chih-Chung Chang and Chih-Jen Lin",
  title =        "{LIBSVM}: a library for support vector machines",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "27:1--27:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961199",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "LIBSVM is a library for Support Vector Machines
                 (SVMs). We have been actively developing this package
                 since the year 2000. The goal is to help users to
                 easily apply SVM to their applications. LIBSVM has
                 gained wide popularity in machine learning and many
                 other areas. In this article, we present all
                 implementation details of LIBSVM. Issues such as
                 solving SVM optimization problems theoretical
                 convergence multiclass classification probability
                 estimates and parameter selection are discussed in
                 detail.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gasso:2011:BOL,
  author =       "Gilles Gasso and Aristidis Pappaioannou and Marina
                 Spivak and L{\'e}on Bottou",
  title =        "Batch and online learning algorithms for nonconvex
                 {Neyman--Pearson} classification",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "28:1--28:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961200",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ma:2011:LRE,
  author =       "Hao Ma and Irwin King and Michael R. Lyu",
  title =        "Learning to recommend with explicit and implicit
                 social relations",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "29:1--29:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961201",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ma:2011:LDM,
  author =       "Justin Ma and Lawrence K. Saul and Stefan Savage and
                 Geoffrey M. Voelker",
  title =        "Learning to detect malicious {URLs}",
  journal =      j-TIST,
  volume =       "2",
  number =       "3",
  pages =        "30:1--30:??",
  month =        apr,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1961189.1961202",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri May 13 11:20:03 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Malicious Web sites are a cornerstone of Internet
                 criminal activities. The dangers of these sites have
                 created a demand for safeguards that protect end-users
                 from visiting them. This article explores how to detect
                 malicious Web sites from the lexical and host-based
                 features of their URLs. We show that this problem lends
                 itself naturally to modern algorithms for online
                 learning. Online algorithms not only process large
                 numbers of URLs more efficiently than batch algorithms,
                 they also adapt more quickly to new features in the
                 continuously evolving distribution of malicious URLs.
                 We develop a real-time system for gathering URL
                 features and pair it with a real-time feed of labeled
                 URLs from a large Web mail provider. From these
                 features and labels, we are able to train an online
                 classifier that detects malicious Web sites with 99\%
                 accuracy over a balanced dataset.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gomes:2011:ISI,
  author =       "Carla Gomes and Qiang Yang",
  title =        "Introduction to special issue on computational
                 sustainability",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "31:1--31:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989735",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Krause:2011:SAO,
  author =       "Andreas Krause and Carlos Guestrin",
  title =        "Submodularity and its applications in optimized
                 information gathering",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "32:1--32:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989736",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cattafi:2011:SBP,
  author =       "Massimiliano Cattafi and Marco Gavanelli and Michela
                 Milano and Paolo Cagnoli",
  title =        "Sustainable biomass power plant location in the
                 {Italian Emilia-Romagna} region",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "33:1--33:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989737",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Patnaik:2011:TDM,
  author =       "Debprakash Patnaik and Manish Marwah and Ratnesh K.
                 Sharma and Naren Ramakrishnan",
  title =        "Temporal data mining approaches for sustainable
                 chiller management in data centers",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "34:1--34:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989738",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "34",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ramchurn:2011:ABH,
  author =       "Sarvapali D. Ramchurn and Perukrishnen Vytelingum and
                 Alex Rogers and Nicholas R. Jennings",
  title =        "Agent-based homeostatic control for green energy in
                 the smart grid",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "35:1--35:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989739",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "35",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Mithal:2011:MGF,
  author =       "Varun Mithal and Ashish Garg and Shyam Boriah and
                 Michael Steinbach and Vipin Kumar and Christopher
                 Potter and Steven Klooster and Juan Carlos
                 Castilla-Rubio",
  title =        "Monitoring global forest cover using data mining",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "36:1--36:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989740",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "36",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2011:MMM,
  author =       "Zhenhui Li and Jiawei Han and Ming Ji and Lu-An Tang
                 and Yintao Yu and Bolin Ding and Jae-Gil Lee and Roland
                 Kays",
  title =        "{MoveMine}: Mining moving object data for discovery of
                 animal movement patterns",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "37:1--37:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989741",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "37",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Toole:2011:SCC,
  author =       "Jameson L. Toole and Nathan Eagle and Joshua B.
                 Plotkin",
  title =        "Spatiotemporal correlations in criminal offense
                 records",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "38:1--38:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989742",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "38",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ding:2011:SCD,
  author =       "Wei Ding and Tomasz F. Stepinski and Yang Mu and
                 Lourenco Bandeira and Ricardo Ricardo and Youxi Wu and
                 Zhenyu Lu and Tianyu Cao and Xindong Wu",
  title =        "Subkilometer crater discovery with boosting and
                 transfer learning",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "39:1--39:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989743",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "39",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Berry:2011:PPA,
  author =       "Pauline M. Berry and Melinda Gervasio and Bart
                 Peintner and Neil Yorke-Smith",
  title =        "{PTIME}: Personalized assistance for calendaring",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "40:1--40:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989744",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "40",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Reddy:2011:PSA,
  author =       "Sudhakar Y. Reddy and Jeremy D. Frank and Michael J.
                 Iatauro and Matthew E. Boyce and Elif K{\"u}rkl{\"u}
                 and Mitchell Ai-Chang and Ari K. J{\'o}nsson",
  title =        "Planning solar array operations on the {International
                 Space Station}",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "41:1--41:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989745",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "41",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Haigh:2011:RLL,
  author =       "Karen Zita Haigh and Fusun Yaman",
  title =        "{RECYCLE}: Learning looping workflows from annotated
                 traces",
  journal =      j-TIST,
  volume =       "2",
  number =       "4",
  pages =        "42:1--42:??",
  month =        jul,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1989734.1989746",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  bibdate =      "Fri Jul 22 08:50:59 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "42",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Guy:2011:I,
  author =       "Ido Guy and Li Chen and Michelle X. Zhou",
  title =        "Introduction",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "1:1--1:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036265",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lipczak:2011:ETR,
  author =       "Marek Lipczak and Evangelos Milios",
  title =        "Efficient Tag Recommendation for Real-Life Data",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "2:1--2:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036266",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Vasuki:2011:SAR,
  author =       "Vishvas Vasuki and Nagarajan Natarajan and Zhengdong
                 Lu and Berkant Savas and Inderjit Dhillon",
  title =        "Scalable Affiliation Recommendation using Auxiliary
                 Networks",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "3:1--3:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036267",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{McNally:2011:CSC,
  author =       "Kevin McNally and Michael P. O'Mahony and Maurice
                 Coyle and Peter Briggs and Barry Smyth",
  title =        "A Case Study of Collaboration and Reputation in Social
                 {Web} Search",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036268",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhao:2011:WDW,
  author =       "Shiwan Zhao and Michelle X. Zhou and Xiatian Zhang and
                 Quan Yuan and Wentao Zheng and Rongyao Fu",
  title =        "Who is Doing What and When: Social Map-Based
                 Recommendation for Content-Centric Social {Web} Sites",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036269",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2011:I,
  author =       "Huan Liu and Dana Nau",
  title =        "Introduction",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036270",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shakarian:2011:GGA,
  author =       "Paulo Shakarian and V. S. Subrahmanian and Maria Luisa
                 Sapino",
  title =        "{GAPs}: Geospatial Abduction Problems",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "7:1--7:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036271",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gal:2011:AAN,
  author =       "Ya'akov Gal and Sarit Kraus and Michele Gelfand and
                 Hilal Khashan and Elizabeth Salmon",
  title =        "An Adaptive Agent for Negotiating with People in
                 Different Cultures",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "8:1--8:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036272",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Vu:2011:FSK,
  author =       "Thuc Vu and Yoav Shoham",
  title =        "Fair Seeding in Knockout Tournaments",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "9:1--9:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036273",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cioffi-Revilla:2011:GIS,
  author =       "Claudio Cioffi-Revilla and J. Daniel Rogers and
                 Atesmachew Hailegiorgis",
  title =        "Geographic Information Systems and Spatial Agent-Based
                 Model Simulations for Sustainable Development",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "10:1--10:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036274",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Jiang:2011:UMS,
  author =       "Yingying Jiang and Feng Tian and Xiaolong (Luke) Zhang
                 and Guozhong Dai and Hongan Wang",
  title =        "Understanding, Manipulating and Searching Hand-Drawn
                 Concept Maps",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "11:1--11:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036275",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2011:IIS,
  author =       "Jingdong Wang and Xian-Sheng Hua",
  title =        "Interactive Image Search by Color Map",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036276",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Prettenhofer:2011:CLA,
  author =       "Peter Prettenhofer and Benno Stein",
  title =        "Cross-Lingual Adaptation Using Structural
                 Correspondence Learning",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036277",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Anagnostopoulos:2011:WPS,
  author =       "Aris Anagnostopoulos and Andrei Z. Broder and Evgeniy
                 Gabrilovich and Vanja Josifovski and Lance Riedel",
  title =        "{Web} Page Summarization for Just-in-Time Contextual
                 Advertising",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036278",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tang:2011:GPU,
  author =       "Lei Tang and Xufei Wang and Huan Liu",
  title =        "Group Profiling for Understanding Social Structures",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036279",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2011:TWC,
  author =       "Zhanyi Liu and Haifeng Wang and Hua Wu and Sheng Li",
  title =        "Two-Word Collocation Extraction Using Monolingual Word
                 Alignment Method",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036280",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liao:2011:MCS,
  author =       "Zhen Liao and Daxin Jiang and Enhong Chen and Jian Pei
                 and Huanhuan Cao and Hang Li",
  title =        "Mining Concept Sequences from Large-Scale Search Logs
                 for Context-Aware Query Suggestion",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036281",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sukthankar:2011:ARD,
  author =       "Gita Sukthankar and Katia Sycara",
  title =        "Activity Recognition for Dynamic Multi-Agent Teams",
  journal =      j-TIST,
  volume =       "3",
  number =       "1",
  pages =        "18:1--18:??",
  month =        oct,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2036264.2036282",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun Nov 6 07:22:40 MST 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2012:ISS,
  author =       "Shixia Liu and Michelle X. Zhou and Giuseppe Carenini
                 and Huamin Qu",
  title =        "Introduction to the Special Section on Intelligent
                 Visual Interfaces for Text Analysis",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "19:1--19:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089095",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cui:2012:WSU,
  author =       "Weiwei Cui and Huamin Qu and Hong Zhou and Wenbin
                 Zhang and Steve Skiena",
  title =        "Watch the Story Unfold with {TextWheel}: Visualization
                 of Large-Scale News Streams",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "20:1--20:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089096",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Keyword-based searching and clustering of news
                 articles have been widely used for news analysis.
                 However, news articles usually have other attributes
                 such as source, author, date and time, length, and
                 sentiment which should be taken into account. In
                 addition, news articles and keywords have complicated
                 macro/micro relations, which include relations between
                 news articles (i.e., macro relation), relations between
                 keywords (i.e., micro relation), and relations between
                 news articles and keywords (i.e., macro-micro
                 relation). These macro/micro relations are time varying
                 and pose special challenges for news analysis. In this
                 article we present a visual analytics system for news
                 streams which can bring multiple attributes of the news
                 articles and the macro/micro relations between news
                 streams and keywords into one coherent analytical
                 context, all the while conveying the dynamic natures of
                 news streams. We introduce a new visualization
                 primitive called TextWheel which consists of one or
                 multiple keyword wheels, a document transportation
                 belt, and a dynamic system which connects the wheels
                 and belt. By observing the TextWheel and its content
                 changes, some interesting patterns can be detected. We
                 use our system to analyze several news corpora related
                 to some major companies and the results demonstrate the
                 high potential of our method.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Thai:2012:VAO,
  author =       "Vinhtuan Thai and Pierre-Yves Rouille and Siegfried
                 Handschuh",
  title =        "Visual Abstraction and Ordering in Faceted Browsing of
                 Text Collections",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "21:1--21:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089097",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Faceted navigation is a technique for the exploration
                 and discovery of a collection of resources, which can
                 be of various types including text documents. While
                 being information-rich resources, documents are usually
                 not treated as content-bearing items in faceted
                 browsing interfaces, and yet the required clean
                 metadata is not always available or matches users'
                 interest. In addition, the existing linear listing
                 paradigm for representing result items from the faceted
                 filtering process makes it difficult for users to
                 traverse or compare across facet values in different
                 orders of importance to them. In this context, we
                 report in this article a visual support toward faceted
                 browsing of a collection of documents based on a set of
                 entities of interest to users. Our proposed approach
                 involves using a multi-dimensional visualization as an
                 alternative to the linear listing of focus items. In
                 this visualization, visual abstraction based on a
                 combination of a conceptual structure and the
                 structural equivalence of documents can be
                 simultaneously used to deal with a large number of
                 items. Furthermore, the approach also enables visual
                 ordering based on the importance of facet values to
                 support prioritized, cross-facet comparisons of focus
                 items. A user study was conducted and the results
                 suggest that interfaces using the proposed approach can
                 support users better in exploratory tasks and were also
                 well-liked by the participants of the study, with the
                 hybrid interface combining the multi-dimensional
                 visualization with the linear listing receiving the
                 most favorable ratings.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Candan:2012:PMV,
  author =       "K. Sel{\c{c}}uk Candan and Luigi {Di Caro} and Maria
                 Luisa Sapino",
  title =        "{PhC}: Multiresolution Visualization and Exploration
                 of Text Corpora with Parallel Hierarchical
                 Coordinates",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "22:1--22:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089098",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The high-dimensional nature of the textual data
                 complicates the design of visualization tools to
                 support exploration of large document corpora. In this
                 article, we first argue that the Parallel Coordinates
                 (PC) technique, which can map multidimensional vectors
                 onto a 2D space in such a way that elements with
                 similar values are represented as similar poly-lines or
                 curves in the visualization space, can be used to help
                 users discern patterns in document collections. The
                 inherent reduction in dimensionality during the mapping
                 from multidimensional points to 2D lines, however, may
                 result in visual complications. For instance, the lines
                 that correspond to clusters of objects that are
                 separate in the multidimensional space may overlap each
                 other in the 2D space; the resulting increase in the
                 number of crossings would make it hard to distinguish
                 the individual document clusters. Such crossings of
                 lines and overly dense regions are significant sources
                 of visual clutter, thus avoiding them may help
                 interpret the visualization. In this article, we note
                 that visual clutter can be significantly reduced by
                 adjusting the resolution of the individual term
                 coordinates by clustering the corresponding values.
                 Such reductions in the resolution of the individual
                 term-coordinates, however, will lead to a certain
                 degree of information loss and thus the appropriate
                 resolution for the term-coordinates has to be selected
                 carefully. Thus, in this article we propose a
                 controlled clutter reduction approach, called Parallel
                 hierarchical Coordinates (or PhC ), for reducing the
                 visual clutter in PC-based visualizations of text
                 corpora. We define visual clutter and information loss
                 measures and provide extensive evaluations that show
                 that the proposed PhC provides significant visual gains
                 (i.e., multiple orders of reductions in visual clutter)
                 with small information loss during visualization and
                 exploration of document collections.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gretarsson:2012:TVA,
  author =       "Brynjar Gretarsson and John O'Donovan and Svetlin
                 Bostandjiev and Tobias H{\"o}llerer and Arthur Asuncion
                 and David Newman and Padhraic Smyth",
  title =        "{TopicNets}: Visual Analysis of Large Text Corpora
                 with Topic Modeling",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "23:1--23:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089099",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We present TopicNets, a Web-based system for visual
                 and interactive analysis of large sets of documents
                 using statistical topic models. A range of
                 visualization types and control mechanisms to support
                 knowledge discovery are presented. These include
                 corpus- and document-specific views, iterative topic
                 modeling, search, and visual filtering. Drill-down
                 functionality is provided to allow analysts to
                 visualize individual document sections and their
                 relations within the global topic space. Analysts can
                 search across a dataset through a set of expansion
                 techniques on selected document and topic nodes.
                 Furthermore, analysts can select relevant subsets of
                 documents and perform real-time topic modeling on these
                 subsets to interactively visualize topics at various
                 levels of granularity, allowing for a better
                 understanding of the documents. A discussion of the
                 design and implementation choices for each visual
                 analysis technique is presented. This is followed by a
                 discussion of three diverse use cases in which
                 TopicNets enables fast discovery of information that is
                 otherwise hard to find. These include a corpus of
                 50,000 successful NSF grant proposals, 10,000
                 publications from a large research center, and single
                 documents including a grant proposal and a PhD
                 thesis.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2012:DFE,
  author =       "Yi Zhang and Tao Li",
  title =        "{DClusterE}: a Framework for Evaluating and
                 Understanding Document Clustering Using Visualization",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "24:1--24:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089100",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Over the last decade, document clustering, as one of
                 the key tasks in information organization and
                 navigation, has been widely studied. Many algorithms
                 have been developed for addressing various challenges
                 in document clustering and for improving clustering
                 performance. However, relatively few research efforts
                 have been reported on evaluating and understanding
                 document clustering results. In this article, we
                 present DClusterE, a comprehensive and effective
                 framework for document clustering evaluation and
                 understanding using information visualization.
                 DClusterE integrates cluster validation with user
                 interactions and offers rich visualization tools for
                 users to examine document clustering results from
                 multiple perspectives. In particular, through
                 informative views including force-directed layout view,
                 matrix view, and cluster view, DClusterE provides not
                 only different aspects of document
                 inter/intra-clustering structures, but also the
                 corresponding relationship between clustering results
                 and the ground truth. Additionally, DClusterE supports
                 general user interactions such as zoom in/out,
                 browsing, and interactive access of the documents at
                 different levels. Two new techniques are proposed to
                 implement DClusterE: (1) A novel multiplicative update
                 algorithm (MUA) for matrix reordering to generate
                 narrow-banded (or clustered) nonzero patterns from
                 documents. Combined with coarse seriation, MUA is able
                 to provide better visualization of the cluster
                 structures. (2) A Mallows-distance-based algorithm for
                 establishing the relationship between the clustering
                 results and the ground truth, which serves as the basis
                 for coloring schemes. Experiments and user studies are
                 conducted to demonstrate the effectiveness and
                 efficiency of DClusterE.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2012:TIT,
  author =       "Shixia Liu and Michelle X. Zhou and Shimei Pan and
                 Yangqiu Song and Weihong Qian and Weijia Cai and
                 Xiaoxiao Lian",
  title =        "{TIARA}: Interactive, Topic-Based Visual Text
                 Summarization and Analysis",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "25:1--25:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089101",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We are building an interactive visual text analysis
                 tool that aids users in analyzing large collections of
                 text. Unlike existing work in visual text analytics,
                 which focuses either on developing sophisticated text
                 analytic techniques or inventing novel text
                 visualization metaphors, ours tightly integrates
                 state-of-the-art text analytics with interactive
                 visualization to maximize the value of both. In this
                 article, we present our work from two aspects. We first
                 introduce an enhanced, LDA-based topic analysis
                 technique that automatically derives a set of topics to
                 summarize a collection of documents and their content
                 evolution over time. To help users understand the
                 complex summarization results produced by our topic
                 analysis technique, we then present the design and
                 development of a time-based visualization of the
                 results. Furthermore, we provide users with a set of
                 rich interaction tools that help them further interpret
                 the visualized results in context and examine the text
                 collection from multiple perspectives. As a result, our
                 work offers three unique contributions. First, we
                 present an enhanced topic modeling technique to provide
                 users with a time-sensitive and more meaningful text
                 summary. Second, we develop an effective visual
                 metaphor to transform abstract and often complex text
                 summarization results into a comprehensible visual
                 representation. Third, we offer users flexible visual
                 interaction tools as alternatives to compensate for the
                 deficiencies of current text summarization techniques.
                 We have applied our work to a number of text corpora
                 and our evaluation shows promise, especially in support
                 of complex text analyses.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Rohrdantz:2012:FBV,
  author =       "Christian Rohrdantz and Ming C. Hao and Umeshwar Dayal
                 and Lars-Erik Haug and Daniel A. Keim",
  title =        "Feature-Based Visual Sentiment Analysis of Text
                 Document Streams",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "26:1--26:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089102",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article describes automatic methods and
                 interactive visualizations that are tightly coupled
                 with the goal to enable users to detect interesting
                 portions of text document streams. In this scenario the
                 interestingness is derived from the sentiment, temporal
                 density, and context coherence that comments about
                 features for different targets (e.g., persons,
                 institutions, product attributes, topics, etc.) have.
                 Contributions are made at different stages of the
                 visual analytics pipeline, including novel ways to
                 visualize salient temporal accumulations for further
                 exploration. Moreover, based on the visualization, an
                 automatic algorithm aims to detect and preselect
                 interesting time interval patterns for different
                 features in order to guide analysts. The main target
                 group for the suggested methods are business analysts
                 who want to explore time-stamped customer feedback to
                 detect critical issues. Finally, application case
                 studies on two different datasets and scenarios are
                 conducted and an extensive evaluation is provided for
                 the presented intelligent visual interface for
                 feature-based sentiment exploration over time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sugiyama:2012:ISS,
  author =       "Masashi Sugiyama and Qiang Yang",
  title =        "Introduction to the Special Section on the {2nd Asia
                 Conference on Machine Learning (ACML 2010)}",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "27:1--27:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089103",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hajimirsadeghi:2012:CIL,
  author =       "Hossein Hajimirsadeghi and Majid Nili Ahmadabadi and
                 Babak Nadjar Araabi and Hadi Moradi",
  title =        "Conceptual Imitation Learning in a Human-Robot
                 Interaction Paradigm",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "28:1--28:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089104",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In general, imitation is imprecisely used to address
                 different levels of social learning from high-level
                 knowledge transfer to low-level regeneration of motor
                 commands. However, true imitation is based on
                 abstraction and conceptualization. This article
                 presents a model for conceptual imitation through
                 interaction with the teacher to abstract
                 spatio-temporal demonstrations based on their
                 functional meaning. Abstraction, concept acquisition,
                 and self-organization of proto-symbols are performed
                 through an incremental and gradual learning algorithm.
                 In this algorithm, Hidden Markov Models (HMMs) are used
                 to abstract perceptually similar demonstrations.
                 However, abstract (relational) concepts emerge as a
                 collection of HMMs irregularly scattered in the
                 perceptual space but showing the same functionality.
                 Performance of the proposed algorithm is evaluated in
                 two experimental scenarios. The first one is a
                 human-robot interaction task of imitating signs
                 produced by hand movements. The second one is a
                 simulated interactive task of imitating whole body
                 motion patterns of a humanoid model. Experimental
                 results show efficiency of our model for concept
                 extraction, proto-symbol emergence, motion pattern
                 recognition, prediction, and generation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2012:MRC,
  author =       "Peipei Li and Xindong Wu and Xuegang Hu",
  title =        "Mining Recurring Concept Drifts with Limited Labeled
                 Streaming Data",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "29:1--29:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089105",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Tracking recurring concept drifts is a significant
                 issue for machine learning and data mining that
                 frequently appears in real-world stream classification
                 problems. It is a challenge for many streaming
                 classification algorithms to learn recurring concepts
                 in a data stream environment with unlabeled data, and
                 this challenge has received little attention from the
                 research community. Motivated by this challenge, this
                 article focuses on the problem of recurring contexts in
                 streaming environments with limited labeled data. We
                 propose a semi-supervised classification algorithm for
                 data streams with REcurring concept Drifts and Limited
                 LAbeled data, called REDLLA, in which a decision tree
                 is adopted as the classification model. When growing a
                 tree, a clustering algorithm based on k -means is
                 installed to produce concept clusters and unlabeled
                 data are labeled in the method of majority-class at
                 leaves. In view of deviations between history and new
                 concept clusters, potential concept drifts are
                 distinguished and recurring concepts are maintained.
                 Extensive studies on both synthetic and real-world data
                 confirm the advantages of our REDLLA algorithm over
                 three state-of-the-art online classification algorithms
                 of CVFDT, DWCDS, and CDRDT and several known online
                 semi-supervised algorithms, even in the case with more
                 than 90\% unlabeled data.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bifet:2012:ERH,
  author =       "Albert Bifet and Eibe Frank and Geoff Holmes and
                 Bernhard Pfahringer",
  title =        "Ensembles of Restricted {Hoeffding} Trees",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "30:1--30:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089106",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The success of simple methods for classification shows
                 that it is often not necessary to model complex
                 attribute interactions to obtain good classification
                 accuracy on practical problems. In this article, we
                 propose to exploit this phenomenon in the data stream
                 context by building an ensemble of Hoeffding trees that
                 are each limited to a small subset of attributes. In
                 this way, each tree is restricted to model interactions
                 between attributes in its corresponding subset. Because
                 it is not known a priori which attribute subsets are
                 relevant for prediction, we build exhaustive ensembles
                 that consider all possible attribute subsets of a given
                 size. As the resulting Hoeffding trees are not all
                 equally important, we weigh them in a suitable manner
                 to obtain accurate classifications. This is done by
                 combining the log-odds of their probability estimates
                 using sigmoid perceptrons, with one perceptron per
                 class. We propose a mechanism for setting the
                 perceptrons' learning rate using the change detection
                 method for data streams, and also use to reset ensemble
                 members (i.e., Hoeffding trees) when they no longer
                 perform well. Our experiments show that the resulting
                 ensemble classifier outperforms bagging for data
                 streams in terms of accuracy when both are used in
                 conjunction with adaptive naive Bayes Hoeffding trees,
                 at the expense of runtime and memory consumption. We
                 also show that our stacking method can improve the
                 performance of a bagged ensemble.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ma:2012:RPC,
  author =       "Huadong Ma and Chengbin Zeng and Charles X. Ling",
  title =        "A Reliable People Counting System via Multiple
                 Cameras",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "31:1--31:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089107",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Reliable and real-time people counting is crucial in
                 many applications. Most previous works can only count
                 moving people from a single camera, which cannot count
                 still people or can fail badly when there is a crowd
                 (i.e., heavy occlusion occurs). In this article, we
                 build a system for robust and fast people counting
                 under occlusion through multiple cameras. To improve
                 the reliability of human detection from a single
                 camera, we use a dimensionality reduction method on the
                 multilevel edge and texture features to handle the
                 large variations in human appearance and poses. To
                 accelerate the detection speed, we propose a novel
                 two-stage cascade-of-rejectors method. To handle the
                 heavy occlusion in crowded scenes, we present a fusion
                 method with error tolerance to combine human detection
                 from multiple cameras. To improve the speed and
                 accuracy of moving people counting, we combine our
                 multiview fusion detection method with particle
                 tracking to count the number of people moving in/out
                 the camera view (`border control'). Extensive
                 experiments and analyses show that our method
                 outperforms state-of-the-art techniques in single- and
                 multicamera datasets for both speed and reliability. We
                 also design a deployed system for fast and reliable
                 people (still or moving) counting by using multiple
                 cameras.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Kolomvatsos:2012:FLS,
  author =       "Kostas Kolomvatsos and Christos Anagnostopoulos and
                 Stathes Hadjiefthymiades",
  title =        "A Fuzzy Logic System for Bargaining in Information
                 Markets",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "32:1--32:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089108",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Future Web business models involve virtual
                 environments where entities interact in order to sell
                 or buy information goods. Such environments are known
                 as Information Markets (IMs). Intelligent agents are
                 used in IMs for representing buyers or information
                 providers (sellers). We focus on the decisions taken by
                 the buyer in the purchase negotiation process with
                 sellers. We propose a reasoning mechanism on the offers
                 (prices of information goods) issued by sellers based
                 on fuzzy logic. The buyer's knowledge on the
                 negotiation process is modeled through fuzzy sets. We
                 propose a fuzzy inference engine dealing with the
                 decisions that the buyer takes on each stage of the
                 negotiation process. The outcome of the proposed
                 reasoning method indicates whether the buyer should
                 accept or reject the sellers' offers. Our findings are
                 very promising for the efficiency of automated
                 transactions undertaken by intelligent agents.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shi:2012:BMA,
  author =       "Lixin Shi and Yuhang Zhao and Jie Tang",
  title =        "Batch Mode Active Learning for Networked Data",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "33:1--33:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089109",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We study a novel problem of batch mode active learning
                 for networked data. In this problem, data instances are
                 connected with links and their labels are correlated
                 with each other, and the goal of batch mode active
                 learning is to exploit the link-based dependencies and
                 node-specific content information to actively select a
                 batch of instances to query the user for learning an
                 accurate model to label unknown instances in the
                 network. We present three criteria (i.e., minimum
                 redundancy, maximum uncertainty, and maximum impact) to
                 quantify the informativeness of a set of instances, and
                 formalize the batch mode active learning problem as
                 selecting a set of instances by maximizing an objective
                 function which combines both link and content
                 information. As solving the objective function is
                 NP-hard, we present an efficient algorithm to optimize
                 the objective function with a bounded approximation
                 rate. To scale to real large networks, we develop a
                 parallel implementation of the algorithm. Experimental
                 results on both synthetic datasets and real-world
                 datasets demonstrate the effectiveness and efficiency
                 of our approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shakarian:2012:AGA,
  author =       "Paulo Shakarian and John P. Dickerson and V. S.
                 Subrahmanian",
  title =        "Adversarial Geospatial Abduction Problems",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "34:1--34:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089110",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Geospatial Abduction Problems (GAPs) involve the
                 inference of a set of locations that `best explain' a
                 given set of locations of observations. For example,
                 the observations might include locations where a serial
                 killer committed murders or where insurgents carried
                 out Improvised Explosive Device (IED) attacks. In both
                 these cases, we would like to infer a set of locations
                 that explain the observations, for example, the set of
                 locations where the serial killer lives/works, and the
                 set of locations where insurgents locate weapons
                 caches. However, unlike all past work on abduction,
                 there is a strong adversarial component to this; an
                 adversary actively attempts to prevent us from
                 discovering such locations. We formalize such abduction
                 problems as a two-player game where both players (an
                 `agent' and an `adversary') use a probabilistic model
                 of their opponent (i.e., a mixed strategy). There is
                 asymmetry as the adversary can choose both the
                 locations of the observations and the locations of the
                 explanation, while the agent (i.e., us) tries to
                 discover these. In this article, we study the problem
                 from the point of view of both players. We define
                 reward functions axiomatically to capture the
                 similarity between two sets of explanations (one
                 corresponding to the locations chosen by the adversary,
                 one guessed by the agent). Many different reward
                 functions can satisfy our axioms. We then formalize the
                 Optimal Adversary Strategy (OAS) problem and the
                 Maximal Counter-Adversary strategy (MCA) and show that
                 both are NP-hard, that their associated counting
                 complexity problems are \#P-hard, and that MCA has no
                 fully polynomial approximation scheme unless P=NP. We
                 show that approximation guarantees are possible for MCA
                 when the reward function satisfies two simple
                 properties (zero-starting and monotonicity) which many
                 natural reward functions satisfy. We develop a mixed
                 integer linear programming algorithm to solve OAS and
                 two algorithms to (approximately) compute MCA; the
                 algorithms yield different approximation guarantees and
                 one algorithm assumes a monotonic reward function. Our
                 experiments use real data about IED attacks over a
                 21-month period in Baghdad. We are able to show that
                 both the MCA algorithms work well in practice; while
                 MCA-GREEDY-MONO is both highly accurate and slightly
                 faster than MCA-LS, MCA-LS (to our surprise) always
                 completely and correctly maximized the expected benefit
                 to the agent while running in an acceptable time
                 period.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "34",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2012:LIS,
  author =       "Xueying Li and Huanhuan Cao and Enhong Chen and Jilei
                 Tian",
  title =        "Learning to Infer the Status of Heavy-Duty Sensors for
                 Energy-Efficient Context-Sensing",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "35:1--35:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089111",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With the prevalence of smart mobile devices with
                 multiple sensors, the commercial application of
                 intelligent context-aware services becomes more and
                 more attractive. However, limited by the battery
                 capacity, the energy efficiency of context-sensing is
                 the bottleneck for the success of context-aware
                 applications. Though several previous studies for
                 energy-efficient context-sensing have been reported,
                 none of them can be applied to multiple types of
                 high-energy-consuming sensors. Moreover, applying
                 machine learning technologies to energy-efficient
                 context-sensing is underexplored too. In this article,
                 we propose to leverage machine learning technologies
                 for improving the energy efficiency of multiple
                 high-energy-consuming context sensors by trading off
                 the sensing accuracy. To be specific, we try to infer
                 the status of high-energy-consuming sensors according
                 to the outputs of software-based sensors and the
                 physical sensors that are necessary to work all the
                 time for supporting the basic functions of mobile
                 devices. If the inference indicates the
                 high-energy-consuming sensor is in a stable status, we
                 avoid the unnecessary invocation and instead use the
                 latest invoked value as the estimation. The
                 experimental results on real datasets show that the
                 energy efficiency of GPS sensing and audio-level
                 sensing are significantly improved by the proposed
                 approach while the sensing accuracy is over 90\%.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "35",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2012:AKR,
  author =       "Weinan Zhang and Dingquan Wang and Gui-Rong Xue and
                 Hongyuan Zha",
  title =        "Advertising Keywords Recommendation for Short-Text
                 {Web} Pages Using {Wikipedia}",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "36:1--36:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089112",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Advertising keywords recommendation is an
                 indispensable component for online advertising with the
                 keywords selected from the target Web pages used for
                 contextual advertising or sponsored search. Several
                 ranking-based algorithms have been proposed for
                 recommending advertising keywords. However, for most of
                 them performance is still lacking, especially when
                 dealing with short-text target Web pages, that is,
                 those containing insufficient textual information for
                 ranking. In some cases, short-text Web pages may not
                 even contain enough keywords for selection. A natural
                 alternative is then to recommend relevant keywords not
                 present in the target Web pages. In this article, we
                 propose a novel algorithm for advertising keywords
                 recommendation for short-text Web pages by leveraging
                 the contents of Wikipedia, a user-contributed online
                 encyclopedia. Wikipedia contains numerous entities with
                 related entities on a topic linked to each other. Given
                 a target Web page, we propose to use a content-biased
                 PageRank on the Wikipedia graph to rank the related
                 entities. Furthermore, in order to recommend
                 high-quality advertising keywords, we also add an
                 advertisement-biased factor into our model. With these
                 two biases, advertising keywords that are both relevant
                 to a target Web page and valuable for advertising are
                 recommended. In our experiments, several
                 state-of-the-art approaches for keyword recommendation
                 are compared. The experimental results demonstrate that
                 our proposed approach produces substantial improvement
                 in the precision of the top 20 recommended keywords on
                 short-text Web pages over existing approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "36",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhou:2012:LAD,
  author =       "Ke Zhou and Jing Bai and Hongyuan Zha and Gui-Rong
                 Xue",
  title =        "Leveraging Auxiliary Data for Learning to Rank",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "37:1--37:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089113",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In learning to rank, both the quality and quantity of
                 the training data have significant impacts on the
                 performance of the learned ranking functions. However,
                 in many applications, there are usually not sufficient
                 labeled training data for the construction of an
                 accurate ranking model. It is therefore desirable to
                 leverage existing training data from other tasks when
                 learning the ranking function for a particular task, an
                 important problem which we tackle in this article
                 utilizing a boosting framework with transfer learning.
                 In particular, we propose to adaptively learn
                 transferable representations called super-features from
                 the training data of both the target task and the
                 auxiliary task. Those super-features and the
                 coefficients for combining them are learned in an
                 iterative stage-wise fashion. Unlike previous transfer
                 learning methods, the super-features can be adaptively
                 learned by weak learners from the data. Therefore, the
                 proposed framework is sufficiently flexible to deal
                 with complicated common structures among different
                 learning tasks. We evaluate the performance of the
                 proposed transfer learning method for two datasets from
                 the Letor collection and one dataset collected from a
                 commercial search engine, and we also compare our
                 methods with several existing transfer learning
                 methods. Our results demonstrate that the proposed
                 method can enhance the ranking functions of the target
                 tasks utilizing the training data from the auxiliary
                 tasks.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "37",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Peng:2012:MVC,
  author =       "Wei Peng and Tong Sun and Shriram Revankar and Tao
                 Li",
  title =        "Mining the {``Voice} of the Customer'' for Business
                 Prioritization",
  journal =      j-TIST,
  volume =       "3",
  number =       "2",
  pages =        "38:1--38:??",
  month =        feb,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2089094.2089114",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 16 15:10:10 MDT 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "To gain competitiveness and sustained growth in the
                 21st century, most businesses are on a mission to
                 become more customer-centric. In order to succeed in
                 this endeavor, it is crucial not only to synthesize and
                 analyze the VOC (the VOice of the Customer) data (i.e.,
                 the feedbacks or requirements raised by customers), but
                 also to quickly turn these data into actionable
                 knowledge. Although there are many technologies being
                 developed in this complex problem space, most existing
                 approaches in analyzing customer requests are ad hoc,
                 time-consuming, error-prone, people-based processes
                 which hardly scale well as the quantity of customer
                 information explodes. This often results in the slow
                 response to customer requests. In this article, in
                 order to mine VOC to extract useful knowledge for the
                 best product or service quality, we develop a hybrid
                 framework that integrates domain knowledge with
                 data-driven approaches to analyze the semi-structured
                 customer requests. The framework consists of capturing
                 functional features, discovering the overlap or
                 correlation among the features, and identifying the
                 evolving feature trend by using the knowledge
                 transformation model. In addition, since understanding
                 the relative importance of the individual customer
                 request is very critical and has a direct impact on the
                 effective prioritization in the development process, we
                 develop a novel semantic enhanced link-based ranking
                 (SELRank) algorithm for relatively rating/ranking both
                 customer requests and products. The framework has been
                 successfully applied on Xerox Office Group Feature
                 Enhancement Requirements (XOG FER) datasets to analyze
                 customer requests.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "38",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hua:2012:ISS,
  author =       "Xian-Sheng Hua and Qi Tian and Alberto {Del Bimbo} and
                 Ramesh Jain",
  title =        "Introduction to the {Special Section on Intelligent
                 Multimedia Systems and Technology Part II}",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "39:1--39:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168753",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "39",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yang:2012:MRM,
  author =       "Yi-Hsuan Yang and Homer H. Chen",
  title =        "Machine Recognition of Music Emotion: a Review",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "40:1--40:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168754",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The proliferation of MP3 players and the exploding
                 amount of digital music content call for novel ways of
                 music organization and retrieval to meet the
                 ever-increasing demand for easy and effective
                 information access. As almost every music piece is
                 created to convey emotion, music organization and
                 retrieval by emotion is a reasonable way of accessing
                 music information. A good deal of effort has been made
                 in the music information retrieval community to train a
                 machine to automatically recognize the emotion of a
                 music signal. A central issue of machine recognition of
                 music emotion is the conceptualization of emotion and
                 the associated emotion taxonomy. Different viewpoints
                 on this issue have led to the proposal of different
                 ways of emotion annotation, model training, and result
                 visualization. This article provides a comprehensive
                 review of the methods that have been proposed for music
                 emotion recognition. Moreover, as music emotion
                 recognition is still in its infancy, there are many
                 open issues. We review the solutions that have been
                 proposed to address these issues and conclude with
                 suggestions for further research.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "40",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ewerth:2012:RVC,
  author =       "Ralph Ewerth and Markus M{\"u}hling and Bernd
                 Freisleben",
  title =        "Robust Video Content Analysis via Transductive
                 Learning",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "41:1--41:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168755",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Reliable video content analysis is an essential
                 prerequisite for effective video search. An important
                 current research question is how to develop robust
                 video content analysis methods that produce
                 satisfactory results for a large variety of video
                 sources, distribution platforms, genres, and content.
                 The work presented in this article exploits the
                 observation that the appearance of objects and events
                 is often related to a particular video sequence,
                 episode, program, or broadcast. This motivates our idea
                 of considering the content analysis task for a single
                 video or episode as a transductive setting: the final
                 classification model must be optimal for the given
                 video only, and not in general, as expected for
                 inductive learning. For this purpose, the unlabeled
                 video test data have to be used in the learning
                 process. In this article, a transductive learning
                 framework for robust video content analysis based on
                 feature selection and ensemble classification is
                 presented. In contrast to related transductive
                 approaches for video analysis (e.g., for concept
                 detection), the framework is designed in a general
                 manner and not only for a single task. The proposed
                 framework is applied to the following video analysis
                 tasks: shot boundary detection, face recognition,
                 semantic video retrieval, and semantic indexing of
                 computer game sequences. Experimental results for
                 diverse video analysis tasks and large test sets
                 demonstrate that the proposed transductive framework
                 improves the robustness of the underlying
                 state-of-the-art approaches, whereas transductive
                 support vector machines do not solve particular tasks
                 in a satisfactory manner.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "41",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Suk:2012:VHM,
  author =       "Myunghoon Suk and Ashok Ramadass and Yohan Jin and B.
                 Prabhakaran",
  title =        "Video Human Motion Recognition Using a Knowledge-Based
                 Hybrid Method Based on a Hidden {Markov} Model",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "42:1--42:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168756",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Human motion recognition in video data has several
                 interesting applications in fields such as gaming,
                 senior/assisted-living environments, and surveillance.
                 In these scenarios, we may have to consider adding new
                 motion classes (i.e., new types of human motions to be
                 recognized), as well as new training data (e.g., for
                 handling different type of subjects). Hence, both the
                 accuracy of classification and training time for the
                 machine learning algorithms become important
                 performance parameters in these cases. In this article,
                 we propose a knowledge-based hybrid (KBH) method that
                 can compute the probabilities for hidden Markov models
                 (HMMs) associated with different human motion classes.
                 This computation is facilitated by appropriately mixing
                 features from two different media types (3D motion
                 capture and 2D video). We conducted a variety of
                 experiments comparing the proposed KBH for HMMs and the
                 traditional Baum-Welch algorithms. With the advantage
                 of computing the HMM parameter in a noniterative
                 manner, the KBH method outperforms the Baum-Welch
                 algorithm both in terms of accuracy as well as in
                 reduced training time. Moreover, we show in additional
                 experiments that the KBH method also outperforms the
                 linear support vector machine (SVM).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "42",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2012:RVT,
  author =       "Shengping Zhang and Hongxun Yao and Xin Sun and
                 Shaohui Liu",
  title =        "Robust Visual Tracking Using an Effective Appearance
                 Model Based on Sparse Coding",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "43:1--43:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168757",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Intelligent video surveillance is currently one of the
                 most active research topics in computer vision,
                 especially when facing the explosion of video data
                 captured by a large number of surveillance cameras. As
                 a key step of an intelligent surveillance system,
                 robust visual tracking is very challenging for computer
                 vision. However, it is a basic functionality of the
                 human visual system (HVS). Psychophysical findings have
                 shown that the receptive fields of simple cells in the
                 visual cortex can be characterized as being spatially
                 localized, oriented, and bandpass, and it forms a
                 sparse, distributed representation of natural images.
                 In this article, motivated by these findings, we
                 propose an effective appearance model based on sparse
                 coding and apply it in visual tracking. Specifically,
                 we consider the responses of general basis functions
                 extracted by independent component analysis on a large
                 set of natural image patches as features and model the
                 appearance of the tracked target as the probability
                 distribution of these features. In order to make the
                 tracker more robust to partial occlusion, camouflage
                 environments, pose changes, and illumination changes,
                 we further select features that are related to the
                 target based on an entropy-gain criterion and ignore
                 those that are not. The target is finally represented
                 by the probability distribution of those related
                 features. The target search is performed by minimizing
                 the Matusita distance between the distributions of the
                 target model and a candidate using Newton-style
                 iterations. The experimental results validate that the
                 proposed method is more robust and effective than three
                 state-of-the-art methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "43",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ji:2012:CAS,
  author =       "Rongrong Ji and Hongxun Yao and Qi Tian and Pengfei Xu
                 and Xiaoshuai Sun and Xianming Liu",
  title =        "Context-Aware Semi-Local Feature Detector",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "44:1--44:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168758",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "How can interest point detectors benefit from
                 contextual cues? In this articles, we introduce a
                 context-aware semi-local detector (CASL) framework to
                 give a systematic answer with three contributions: (1)
                 We integrate the context of interest points to
                 recurrently refine their detections. (2) This
                 integration boosts interest point detectors from the
                 traditionally local scale to a semi-local scale to
                 discover more discriminative salient regions. (3) Such
                 context-aware structure further enables us to bring
                 forward category learning (usually in the subsequent
                 recognition phase) into interest point detection to
                 locate category-aware, meaningful salient regions. Our
                 CASL detector consists of two phases. The first phase
                 accumulates multiscale spatial correlations of local
                 features into a difference of contextual Gaussians
                 (DoCG) field. DoCG quantizes detector context to
                 highlight contextually salient regions at a semi-local
                 scale, which also reveals visual attentions to a
                 certain extent. The second phase locates contextual
                 peaks by mean shift search over the DoCG field, which
                 subsequently integrates contextual cues into feature
                 description. This phase enables us to integrate
                 category learning into mean shift search kernels. This
                 learning-based CASL mechanism produces more
                 category-aware features, which substantially benefits
                 the subsequent visual categorization process. We
                 conducted experiments in image search, object
                 characterization, and feature detector repeatability
                 evaluations, which reported superior discriminability
                 and comparable repeatability to state-of-the-art
                 works.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "44",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Berretti:2012:DFF,
  author =       "Stefano Berretti and Alberto {Del Bimbo} and Pietro
                 Pala",
  title =        "Distinguishing Facial Features for Ethnicity-Based
                 {$3$D} Face Recognition",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "45:1--45:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168759",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Among different approaches for 3D face recognition,
                 solutions based on local facial characteristics are
                 very promising, mainly because they can manage facial
                 expression variations by assigning different weights to
                 different parts of the face. However, so far, a few
                 works have investigated the individual relevance that
                 local features play in 3D face recognition with very
                 simple solutions applied in the practice. In this
                 article, a local approach to 3D face recognition is
                 combined with a feature selection model to study the
                 relative relevance of different regions of the face for
                 the purpose of discriminating between different
                 subjects. The proposed solution is experimented using
                 facial scans of the Face Recognition Grand Challenge
                 dataset. Results of the experimentation are two-fold:
                 they quantitatively demonstrate the assumption that
                 different regions of the face have different relevance
                 for face discrimination and also show that the
                 relevance of facial regions changes for different
                 ethnic groups.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "45",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2012:GAS,
  author =       "Ning Zhang and Ling-Yu Duan and Lingfang Li and
                 Qingming Huang and Jun Du and Wen Gao and Ling Guan",
  title =        "A Generic Approach for Systematic Analysis of Sports
                 Videos",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "46:1--46:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168760",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Various innovative and original works have been
                 applied and proposed in the field of sports video
                 analysis. However, individual works have focused on
                 sophisticated methodologies with particular sport types
                 and there has been a lack of scalable and holistic
                 frameworks in this field. This article proposes a
                 solution and presents a systematic and generic approach
                 which is experimented on a relatively large-scale
                 sports consortia. The system aims at the event
                 detection scenario of an input video with an orderly
                 sequential process. Initially, domain
                 knowledge-independent local descriptors are extracted
                 homogeneously from the input video sequence. Then the
                 video representation is created by adopting a
                 bag-of-visual-words (BoW) model. The video's genre is
                 first identified by applying the k-nearest neighbor
                 (k-NN) classifiers on the initially obtained video
                 representation, and various dissimilarity measures are
                 assessed and evaluated analytically. Subsequently, an
                 unsupervised probabilistic latent semantic analysis
                 (PLSA)-based approach is employed at the same
                 histogram-based video representation, characterizing
                 each frame of video sequence into one of four view
                 groups, namely closed-up-view, mid-view, long-view, and
                 outer-field-view. Finally, a hidden conditional random
                 field (HCRF) structured prediction model is utilized
                 for interesting event detection. From experimental
                 results, k-NN classifier using KL-divergence
                 measurement demonstrates the best accuracy at 82.16\%
                 for genre categorization. Supervised SVM and
                 unsupervised PLSA have average classification
                 accuracies at 82.86\% and 68.13\%, respectively. The
                 HCRF model achieves 92.31\% accuracy using the
                 unsupervised PLSA based label input, which is
                 comparable with the supervised SVM based input at an
                 accuracy of 93.08\%. In general, such a systematic
                 approach can be widely applied in processing massive
                 videos generically.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "46",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Leung:2012:ISM,
  author =       "Clement H. C. Leung and Alice W. S. Chan and Alfredo
                 Milani and Jiming Liu and Yuanxi Li",
  title =        "Intelligent Social Media Indexing and Sharing Using an
                 Adaptive Indexing Search Engine",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "47:1--47:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168761",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Effective sharing of diverse social media is often
                 inhibited by limitations in their search and discovery
                 mechanisms, which are particularly restrictive for
                 media that do not lend themselves to automatic
                 processing or indexing. Here, we present the structure
                 and mechanism of an adaptive search engine which is
                 designed to overcome such limitations. The basic
                 framework of the adaptive search engine is to capture
                 human judgment in the course of normal usage from user
                 queries in order to develop semantic indexes which link
                 search terms to media objects semantics. This approach
                 is particularly effective for the retrieval of
                 multimedia objects, such as images, sounds, and videos,
                 where a direct analysis of the object features does not
                 allow them to be linked to search terms, for example,
                 nontextual/icon-based search, deep semantic search, or
                 when search terms are unknown at the time the media
                 repository is built. An adaptive search architecture is
                 presented to enable the index to evolve with respect to
                 user feedback, while a randomized query-processing
                 technique guarantees avoiding local minima and allows
                 the meaningful indexing of new media objects and new
                 terms. The present adaptive search engine allows for
                 the efficient community creation and updating of social
                 media indexes, which is able to instill and propagate
                 deep knowledge into social media concerning the
                 advanced search and usage of media resources.
                 Experiments with various relevance distribution
                 settings have shown efficient convergence of such
                 indexes, which enable intelligent search and sharing of
                 social media resources that are otherwise hard to
                 discover.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "47",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chien:2012:ISS,
  author =       "Steve Chien and Amedeo Cesta",
  title =        "Introduction to the Special Section on Artificial
                 Intelligence in Space",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "48:1--48:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168762",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "48",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wagstaff:2012:DLS,
  author =       "Kiri L. Wagstaff and Julian Panetta and Adnan Ansar
                 and Ronald Greeley and Mary Pendleton Hoffer and
                 Melissa Bunte and Norbert Sch{\"o}rghofer",
  title =        "Dynamic Landmarking for Surface Feature Identification
                 and Change Detection",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "49:1--49:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168763",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Given the large volume of images being sent back from
                 remote spacecraft, there is a need for automated
                 analysis techniques that can quickly identify
                 interesting features in those images. Feature
                 identification in individual images and automated
                 change detection in multiple images of the same target
                 are valuable for scientific studies and can inform
                 subsequent target selection. We introduce a new
                 approach to orbital image analysis called dynamic
                 landmarking. It focuses on the identification and
                 comparison of visually salient features in images. We
                 have evaluated this approach on images collected by
                 five Mars orbiters. These evaluations were motivated by
                 three scientific goals: to study fresh impact craters,
                 dust devil tracks, and dark slope streaks on Mars. In
                 the process we also detected a different kind of
                 surface change that may indicate seasonally exposed
                 bedforms. These experiences also point the way to how
                 this approach could be used in an onboard setting to
                 analyze and prioritize data as it is collected.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "49",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Estlin:2012:AAS,
  author =       "Tara A. Estlin and Benjamin J. Bornstein and Daniel M.
                 Gaines and Robert C. Anderson and David R. Thompson and
                 Michael Burl and Rebecca Casta{\~n}o and Michele Judd",
  title =        "{AEGIS} Automated Science Targeting for the {MER
                 Opportunity Rover}",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "50:1--50:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168764",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The Autonomous Exploration for Gathering Increased
                 Science (AEGIS) system enables automated data
                 collection by planetary rovers. AEGIS software was
                 uploaded to the Mars Exploration Rover (MER) mission's
                 Opportunity rover in December 2009 and has successfully
                 demonstrated automated onboard targeting based on
                 scientist-specified objectives. Prior to AEGIS, images
                 were transmitted from the rover to the operations team
                 on Earth; scientists manually analyzed the images,
                 selected geological targets for the rover's
                 remote-sensing instruments, and then generated a
                 command sequence to execute the new measurements. AEGIS
                 represents a significant paradigm shift---by using
                 onboard data analysis techniques, the AEGIS software
                 uses scientist input to select high-quality science
                 targets with no human in the loop. This approach allows
                 the rover to autonomously select and sequence targeted
                 observations in an opportunistic fashion, which is
                 particularly applicable for narrow field-of-view
                 instruments (such as the MER Mini-TES spectrometer, the
                 MER Panoramic camera, and the 2011 Mars Science
                 Laboratory (MSL) ChemCam spectrometer). This article
                 provides an overview of the AEGIS automated targeting
                 capability and describes how it is currently being used
                 onboard the MER mission Opportunity rover.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "50",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hayden:2012:UCM,
  author =       "David S. Hayden and Steve Chien and David R. Thompson
                 and Rebecca Casta{\~n}o",
  title =        "Using Clustering and Metric Learning to Improve
                 Science Return of Remote Sensed Imagery",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "51:1--51:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168765",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Current and proposed remote space missions, such as
                 the proposed aerial exploration of Titan by an aerobot,
                 often can collect more data than can be communicated
                 back to Earth. Autonomous selective downlink algorithms
                 can choose informative subsets of data to improve the
                 science value of these bandwidth-limited transmissions.
                 This requires statistical descriptors of the data that
                 reflect very abstract and subtle distinctions in
                 science content. We propose a metric learning strategy
                 that teaches algorithms how best to cluster new data
                 based on training examples supplied by domain
                 scientists. We demonstrate that clustering informed by
                 metric learning produces results that more closely
                 match multiple scientists' labelings of aerial data
                 than do clusterings based on random or periodic
                 sampling. A new metric-learning strategy accommodates
                 training sets produced by multiple scientists with
                 different and potentially inconsistent mission
                 objectives. Our methods are fit for current spacecraft
                 processors (e.g., RAD750) and would further benefit
                 from more advanced spacecraft processor architectures,
                 such as OPERA.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "51",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hoi:2012:ISS,
  author =       "Steven C. H. Hoi and Rong Jin and Jinhui Tang and
                 Zhi-Hua Zhou",
  title =        "Introduction to the Special Section on Distance Metric
                 Learning in Intelligent Systems",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "52:1--52:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168766",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "52",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhai:2012:MML,
  author =       "Deming Zhai and Hong Chang and Shiguang Shan and Xilin
                 Chen and Wen Gao",
  title =        "Multiview Metric Learning with Global Consistency and
                 Local Smoothness",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "53:1--53:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168767",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In many real-world applications, the same object may
                 have different observations (or descriptions) from
                 multiview observation spaces, which are highly related
                 but sometimes look different from each other.
                 Conventional metric-learning methods achieve
                 satisfactory performance on distance metric computation
                 of data in a single-view observation space, but fail to
                 handle well data sampled from multiview observation
                 spaces, especially those with highly nonlinear
                 structure. To tackle this problem, we propose a new
                 method called Multiview Metric Learning with Global
                 consistency and Local smoothness (MVML-GL) under a
                 semisupervised learning setting, which jointly
                 considers global consistency and local smoothness. The
                 basic idea is to reveal the shared latent feature space
                 of the multiview observations by embodying global
                 consistency constraints and preserving local geometric
                 structures. Specifically, this framework is composed of
                 two main steps. In the first step, we seek a global
                 consistent shared latent feature space, which not only
                 preserves the local geometric structure in each space
                 but also makes those labeled corresponding instances as
                 close as possible. In the second step, the explicit
                 mapping functions between the input spaces and the
                 shared latent space are learned via regularized locally
                 linear regression. Furthermore, these two steps both
                 can be solved by convex optimizations in closed form.
                 Experimental results with application to manifold
                 alignment on real-world datasets of pose and facial
                 expression demonstrate the effectiveness of the
                 proposed method.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "53",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2012:TML,
  author =       "Yu Zhang and Dit-Yan Yeung",
  title =        "Transfer Metric Learning with Semi-Supervised
                 Extension",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "54:1--54:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168768",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Distance metric learning plays a very crucial role in
                 many data mining algorithms because the performance of
                 an algorithm relies heavily on choosing a good metric.
                 However, the labeled data available in many
                 applications is scarce, and hence the metrics learned
                 are often unsatisfactory. In this article, we consider
                 a transfer-learning setting in which some related
                 source tasks with labeled data are available to help
                 the learning of the target task. We first propose a
                 convex formulation for multitask metric learning by
                 modeling the task relationships in the form of a task
                 covariance matrix. Then we regard transfer learning as
                 a special case of multitask learning and adapt the
                 formulation of multitask metric learning to the
                 transfer-learning setting for our method, called
                 transfer metric learning (TML). In TML, we learn the
                 metric and the task covariances between the source
                 tasks and the target task under a unified convex
                 formulation. To solve the convex optimization problem,
                 we use an alternating method in which each subproblem
                 has an efficient solution. Moreover, in many
                 applications, some unlabeled data is also available in
                 the target task, and so we propose a semi-supervised
                 extension of TML called STML to further improve the
                 generalization performance by exploiting the unlabeled
                 data based on the manifold assumption. Experimental
                 results on some commonly used transfer-learning
                 applications demonstrate the effectiveness of our
                 method.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "54",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Xu:2012:MLE,
  author =       "Jun-Ming Xu and Xiaojin Zhu and Timothy T. Rogers",
  title =        "Metric Learning for Estimating Psychological
                 Similarities",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "55:1--55:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168769",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "An important problem in cognitive psychology is to
                 quantify the perceived similarities between stimuli.
                 Previous work attempted to address this problem with
                 multidimensional scaling (MDS) and its variants.
                 However, there are several shortcomings of the MDS
                 approaches. We propose Yada, a novel general
                 metric-learning procedure based on two-alternative
                 forced-choice behavioral experiments. Our method learns
                 forward and backward nonlinear mappings between an
                 objective space in which the stimuli are defined by the
                 standard feature vector representation and a subjective
                 space in which the distance between a pair of stimuli
                 corresponds to their perceived similarity. We conduct
                 experiments on both synthetic and real human behavioral
                 datasets to assess the effectiveness of Yada. The
                 results show that Yada outperforms several standard
                 embedding and metric-learning algorithms, both in terms
                 of likelihood and recovery error.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "55",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zheng:2012:MTP,
  author =       "Yan-Tao Zheng and Zheng-Jun Zha and Tat-Seng Chua",
  title =        "Mining Travel Patterns from Geotagged Photos",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "56:1--56:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168770",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Recently, the phenomenal advent of photo-sharing
                 services, such as Flickr and Panoramio, have led to
                 voluminous community-contributed photos with text tags,
                 timestamps, and geographic references on the Internet.
                 The photos, together with their time- and
                 geo-references, become the digital footprints of photo
                 takers and implicitly document their spatiotemporal
                 movements. This study aims to leverage the wealth of
                 these enriched online photos to analyze people's travel
                 patterns at the local level of a tour destination.
                 Specifically, we focus our analysis on two aspects: (1)
                 tourist movement patterns in relation to the regions of
                 attractions (RoA), and (2) topological characteristics
                 of travel routes by different tourists. To do so, we
                 first build a statistically reliable database of travel
                 paths from a noisy pool of community-contributed
                 geotagged photos on the Internet. We then investigate
                 the tourist traffic flow among different RoAs by
                 exploiting the Markov chain model. Finally, the
                 topological characteristics of travel routes are
                 analyzed by performing a sequence clustering on tour
                 routes. Testings on four major cities demonstrate
                 promising results of the proposed system.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "56",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Rendle:2012:FML,
  author =       "Steffen Rendle",
  title =        "Factorization Machines with {libFM}",
  journal =      j-TIST,
  volume =       "3",
  number =       "3",
  pages =        "57:1--57:??",
  month =        may,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2168752.2168771",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:23 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Factorization approaches provide high accuracy in
                 several important prediction problems, for example,
                 recommender systems. However, applying factorization
                 approaches to a new prediction problem is a nontrivial
                 task and requires a lot of expert knowledge. Typically,
                 a new model is developed, a learning algorithm is
                 derived, and the approach has to be implemented.
                 Factorization machines (FM) are a generic approach
                 since they can mimic most factorization models just by
                 feature engineering. This way, factorization machines
                 combine the generality of feature engineering with the
                 superiority of factorization models in estimating
                 interactions between categorical variables of large
                 domain. libFM is a software implementation for
                 factorization machines that features stochastic
                 gradient descent (SGD) and alternating least-squares
                 (ALS) optimization, as well as Bayesian inference using
                 Markov Chain Monto Carlo (MCMC). This article
                 summarizes the recent research on factorization
                 machines both in terms of modeling and learning,
                 provides extensions for the ALS and MCMC algorithms,
                 and describes the software tool libFM.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "57",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gabrilovich:2012:ISS,
  author =       "Evgeniy Gabrilovich and Zhong Su and Jie Tang",
  title =        "Introduction to the {Special Section on Computational
                 Models of Collective Intelligence in the Social Web}",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "58:1--58:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337543",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "58",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Herdagdelen:2012:BGP,
  author =       "Ama{\c{c}} Herdagdelen and Marco Baroni",
  title =        "Bootstrapping a Game with a Purpose for Commonsense
                 Collection",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "59:1--59:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337544",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Text mining has been very successful in extracting
                 huge amounts of commonsense knowledge from data, but
                 the extracted knowledge tends to be extremely noisy.
                 Manual construction of knowledge repositories, on the
                 other hand, tends to produce high-quality data in very
                 small amounts. We propose an architecture to combine
                 the best of both worlds: A game with a purpose that
                 induces humans to clean up data automatically extracted
                 by text mining. First, a text miner trained on a set of
                 known commonsense facts harvests many more candidate
                 facts from corpora. Then, a simple
                 slot-machine-with-a-purpose game presents these
                 candidate facts to the players for verification by
                 playing. As a result, a new dataset of high precision
                 commonsense knowledge is created. This combined
                 architecture is able to produce significantly better
                 commonsense facts than the state-of-the-art text miner
                 alone. Furthermore, we report that bootstrapping (i.e.,
                 training the text miner on the output of the game)
                 improves the subsequent performance of the text
                 miner.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "59",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Carmel:2012:FBT,
  author =       "David Carmel and Erel Uziel and Ido Guy and Yosi Mass
                 and Haggai Roitman",
  title =        "Folksonomy-Based Term Extraction for Word Cloud
                 Generation",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "60:1--60:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337545",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this work we study the task of term extraction for
                 word cloud generation in sparsely tagged domains, in
                 which manual tags are scarce. We present a
                 folksonomy-based term extraction method, called
                 tag-boost, which boosts terms that are frequently used
                 by the public to tag content. Our experiments with
                 tag-boost based term extraction over different domains
                 demonstrate tremendous improvement in word cloud
                 quality, as reflected by the agreement between manual
                 tags of the testing items and the cloud's terms
                 extracted from the items' content. Moreover, our
                 results demonstrate the high robustness of this
                 approach, as compared to alternative cloud generation
                 methods that exhibit a high sensitivity to data
                 sparseness. Additionally, we show that tag-boost can be
                 effectively applied even in nontagged domains, by using
                 an external rich folksonomy borrowed from a well-tagged
                 domain.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "60",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2012:IOS,
  author =       "Guan Wang and Sihong Xie and Bing Liu and Philip S.
                 Yu",
  title =        "Identify Online Store Review Spammers via Social
                 Review Graph",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "61:1--61:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337546",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Online shopping reviews provide valuable information
                 for customers to compare the quality of products, store
                 services, and many other aspects of future purchases.
                 However, spammers are joining this community trying to
                 mislead consumers by writing fake or unfair reviews to
                 confuse the consumers. Previous attempts have used
                 reviewers' behaviors such as text similarity and rating
                 patterns, to detect spammers. These studies are able to
                 identify certain types of spammers, for instance, those
                 who post many similar reviews about one target.
                 However, in reality, there are other kinds of spammers
                 who can manipulate their behaviors to act just like
                 normal reviewers, and thus cannot be detected by the
                 available techniques. In this article, we propose a
                 novel concept of review graph to capture the
                 relationships among all reviewers, reviews and stores
                 that the reviewers have reviewed as a heterogeneous
                 graph. We explore how interactions between nodes in
                 this graph could reveal the cause of spam and propose
                 an iterative computation model to identify suspicious
                 reviewers. In the review graph, we have three kinds of
                 nodes, namely, reviewer, review, and store. We capture
                 their relationships by introducing three fundamental
                 concepts, the trustiness of reviewers, the honesty of
                 reviews, and the reliability of stores, and identifying
                 their interrelationships: a reviewer is more
                 trustworthy if the person has written more honesty
                 reviews; a store is more reliable if it has more
                 positive reviews from trustworthy reviewers; and a
                 review is more honest if many other honest reviews
                 support it. This is the first time such intricate
                 relationships have been identified for spam detection
                 and captured in a graph model. We further develop an
                 effective computation method based on the proposed
                 graph model. Different from any existing approaches, we
                 do not use an review text information. Our model is
                 thus complementary to existing approaches and able to
                 find more difficult and subtle spamming activities,
                 which are agreed upon by human judges after they
                 evaluate our results.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "61",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lerman:2012:USM,
  author =       "Kristina Lerman and Tad Hogg",
  title =        "Using Stochastic Models to Describe and Predict Social
                 Dynamics of {Web} Users",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "62:1--62:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337547",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The popularity of content in social media is unequally
                 distributed, with some items receiving a
                 disproportionate share of attention from users.
                 Predicting which newly-submitted items will become
                 popular is critically important for both the hosts of
                 social media content and its consumers. Accurate and
                 timely prediction would enable hosts to maximize
                 revenue through differential pricing for access to
                 content or ad placement. Prediction would also give
                 consumers an important tool for filtering the content.
                 Predicting the popularity of content in social media is
                 challenging due to the complex interactions between
                 content quality and how the social media site
                 highlights its content. Moreover, most social media
                 sites selectively present content that has been highly
                 rated by similar users, whose similarity is indicated
                 implicitly by their behavior or explicitly by links in
                 a social network. While these factors make it difficult
                 to predict popularity a priori, stochastic models of
                 user behavior on these sites can allow predicting
                 popularity based on early user reactions to new
                 content. By incorporating the various mechanisms
                 through which web sites display content, such models
                 improve on predictions that are based on simply
                 extrapolating from the early votes. Specifically, for
                 one such site, the news aggregator Digg, we show how a
                 stochastic model distinguishes the effect of the
                 increased visibility due to the network from how
                 interested users are in the content. We find a wide
                 range of interest, distinguishing stories primarily of
                 interest to users in the network (``niche interests'')
                 from those of more general interest to the user
                 community. This distinction is useful for predicting a
                 story's eventual popularity from users' early reactions
                 to the story.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "62",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yin:2012:LCT,
  author =       "Zhijun Yin and Liangliang Cao and Quanquan Gu and
                 Jiawei Han",
  title =        "Latent Community Topic Analysis: Integration of
                 Community Discovery with Topic Modeling",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "63:1--63:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337548",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article studies the problem of latent community
                 topic analysis in text-associated graphs. With the
                 development of social media, a lot of user-generated
                 content is available with user networks. Along with
                 rich information in networks, user graphs can be
                 extended with text information associated with nodes.
                 Topic modeling is a classic problem in text mining and
                 it is interesting to discover the latent topics in
                 text-associated graphs. Different from traditional
                 topic modeling methods considering links, we
                 incorporate community discovery into topic analysis in
                 text-associated graphs to guarantee the topical
                 coherence in the communities so that users in the same
                 community are closely linked to each other and share
                 common latent topics. We handle topic modeling and
                 community discovery in the same framework. In our model
                 we separate the concepts of community and topic, so one
                 community can correspond to multiple topics and
                 multiple communities can share the same topic. We
                 compare different methods and perform extensive
                 experiments on two real datasets. The results confirm
                 our hypothesis that topics could help understand
                 community structure, while community structure could
                 help model topics.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "63",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sizov:2012:LGS,
  author =       "Sergej Sizov",
  title =        "Latent Geospatial Semantics of Social Media",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "64:1--64:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337549",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Multimodal understanding of shared content is an
                 important success factor for many Web 2.0 applications
                 and platforms. This article addresses the fundamental
                 question of geo-spatial awareness in social media
                 applications. In this context, we introduce an approach
                 for improved characterization of social media by
                 combining text features (e.g., tags as a prominent
                 example of short, unstructured text labels) with
                 spatial knowledge (e.g., geotags, coordinates of
                 images, and videos). Our model-based framework GeoFolk
                 combines these two aspects in order to construct better
                 algorithms for content management, retrieval, and
                 sharing. We demonstrate in systematic studies the
                 benefits of this combination for a broad spectrum of
                 scenarios related to social media: recommender systems,
                 automatic content organization and filtering, and event
                 detection. Furthermore, we establish a simple and
                 technically sound model that can be seen as a reference
                 baseline for future research in the field of geotagged
                 social media.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "64",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cortizo:2012:ISS,
  author =       "Jos{\'e} Carlos Cortizo and Francisco Carrero and
                 Iv{\'a}n Cantador and Jos{\'e} Antonio Troyano and
                 Paolo Rosso",
  title =        "Introduction to the Special Section on Search and
                 Mining User-Generated Content",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "65:1--65:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337550",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The primary goal of this special section of ACM
                 Transactions on Intelligent Systems and Technology is
                 to foster research in the interplay between Social
                 Media, Data/Opinion Mining and Search, aiming to
                 reflect the actual developments in technologies that
                 exploit user-generated content.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "65",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Paltoglou:2012:TMD,
  author =       "Georgios Paltoglou and Mike Thelwall",
  title =        "{Twitter}, {MySpace}, {Digg}: Unsupervised Sentiment
                 Analysis in Social Media",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "66:1--66:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337551",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Sentiment analysis is a growing area of research with
                 significant applications in both industry and academia.
                 Most of the proposed solutions are centered around
                 supervised, machine learning approaches and
                 review-oriented datasets. In this article, we focus on
                 the more common informal textual communication on the
                 Web, such as online discussions, tweets and social
                 network comments and propose an intuitive, less
                 domain-specific, unsupervised, lexicon-based approach
                 that estimates the level of emotional intensity
                 contained in text in order to make a prediction. Our
                 approach can be applied to, and is tested in, two
                 different but complementary contexts: subjectivity
                 detection and polarity classification. Extensive
                 experiments were carried on three real-world datasets,
                 extracted from online social Web sites and annotated by
                 human evaluators, against state-of-the-art supervised
                 approaches. The results demonstrate that the proposed
                 algorithm, even though unsupervised, outperforms
                 machine learning solutions in the majority of cases,
                 overall presenting a very robust and reliable solution
                 for sentiment analysis of informal communication on the
                 Web.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "66",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Trivedi:2012:LSB,
  author =       "Anusua Trivedi and Piyush Rai and Hal {Daum{\'e} III}
                 and Scott L. Duvall",
  title =        "Leveraging Social Bookmarks from Partially Tagged
                 Corpus for Improved {Web} Page Clustering",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "67:1--67:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337552",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Automatic clustering of Web pages helps a number of
                 information retrieval tasks, such as improving user
                 interfaces, collection clustering, introducing
                 diversity in search results, etc. Typically, Web page
                 clustering algorithms use only features extracted from
                 the page-text. However, the advent of
                 social-bookmarking Web sites, such as StumbleUpon.com
                 and Delicious.com, has led to a huge amount of
                 user-generated content such as the social tag
                 information that is associated with the Web pages. In
                 this article, we present a subspace based feature
                 extraction approach that leverages the social tag
                 information to complement the page-contents of a Web
                 page for extracting beter features, with the goal of
                 improved clustering performance. In our approach, we
                 consider page-text and tags as two separate views of
                 the data, and learn a shared subspace that maximizes
                 the correlation between the two views. Any clustering
                 algorithm can then be applied in this subspace. We then
                 present an extension that allows our approach to be
                 applicable even if the Web page corpus is only
                 partially tagged, that is, when the social tags are
                 present for not all, but only for a small number of Web
                 pages. We compare our subspace based approach with a
                 number of baselines that use tag information in various
                 other ways, and show that the subspace based approach
                 leads to improved performance on the Web page
                 clustering task. We also discuss some possible future
                 work including an active learning extension that can
                 help in choosing which Web pages to get tags for, if we
                 only can get the social tags for only a small number of
                 Web pages.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "67",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Potthast:2012:IRC,
  author =       "Martin Potthast and Benno Stein and Fabian Loose and
                 Steffen Becker",
  title =        "Information Retrieval in the {Commentsphere}",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "68:1--68:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337553",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article studies information retrieval tasks
                 related to Web comments. Prerequisite of such a study
                 and a main contribution of the article is a unifying
                 survey of the research field. We identify the most
                 important retrieval tasks related to comments, namely
                 filtering, ranking, and summarization. Within these
                 tasks, we distinguish two paradigms according to which
                 comments are utilized and which we designate as
                 comment-targeting and comment-exploiting. Within the
                 first paradigm, the comments themselves form the
                 retrieval targets. Within the second paradigm, the
                 commented items form the retrieval targets (i.e.,
                 comments are used as an additional information source
                 to improve the retrieval performance for the commented
                 items). We report on four case studies to demonstrate
                 the exploration of the commentsphere under information
                 retrieval aspects: comment filtering, comment ranking,
                 comment summarization and cross-media retrieval. The
                 first three studies deal primarily with
                 comment-targeting retrieval, while the last one deals
                 with comment-exploiting retrieval. Throughout the
                 article, connections to information retrieval research
                 are pointed out.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "68",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Carmel:2012:RBN,
  author =       "David Carmel and Haggai Roitman and Elad Yom-Tov",
  title =        "On the Relationship between Novelty and Popularity of
                 User-Generated Content",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "69:1--69:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337554",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This work deals with the task of predicting the
                 popularity of user-generated content. We demonstrate
                 how the novelty of newly published content plays an
                 important role in affecting its popularity. More
                 specifically, we study three dimensions of novelty. The
                 first one, termed contemporaneous novelty, models the
                 relative novelty embedded in a new post with respect to
                 contemporary content that was generated by others. The
                 second type of novelty, termed self novelty, models the
                 relative novelty with respect to the user's own
                 contribution history. The third type of novelty, termed
                 discussion novelty, relates to the novelty of the
                 comments associated by readers with respect to the post
                 content. We demonstrate the contribution of the new
                 novelty measures to estimating blog-post popularity by
                 predicting the number of comments expected for a fresh
                 post. We further demonstrate how novelty based measures
                 can be utilized for predicting the citation volume of
                 academic papers.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "69",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2012:ERQ,
  author =       "Xiaonan Li and Chengkai Li and Cong Yu",
  title =        "Entity-Relationship Queries over {Wikipedia}",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "70:1--70:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337555",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Wikipedia is the largest user-generated knowledge
                 base. We propose a structured query mechanism,
                 entity-relationship query, for searching entities in
                 the Wikipedia corpus by their properties and
                 interrelationships. An entity-relationship query
                 consists of multiple predicates on desired entities.
                 The semantics of each predicate is specified with
                 keywords. Entity-relationship query searches entities
                 directly over text instead of preextracted structured
                 data stores. This characteristic brings two benefits:
                 (1) Query semantics can be intuitively expressed by
                 keywords; (2) It only requires rudimentary entity
                 annotation, which is simpler than explicitly extracting
                 and reasoning about complex semantic information before
                 query-time. We present a ranking framework for general
                 entity-relationship queries and a position-based
                 Bounded Cumulative Model (BCM) for accurate ranking of
                 query answers. We also explore various weighting
                 schemes for further improving the accuracy of BCM. We
                 test our ideas on a 2008 version of Wikipedia using a
                 collection of 45 queries pooled from INEX entity
                 ranking track and our own crafted queries. Experiments
                 show that the ranking and weighting schemes are both
                 effective, particularly on multipredicate queries.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "70",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2012:EFW,
  author =       "Haofen Wang and Linyun Fu and Wei Jin and Yong Yu",
  title =        "{EachWiki}: Facilitating Wiki Authoring by Annotation
                 Suggestion",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "71:1--71:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337556",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Wikipedia, one of the best-known wikis and the world's
                 largest free online encyclopedia, has embraced the
                 power of collaborative editing to harness collective
                 intelligence. However, using such a wiki to create
                 high-quality articles is not as easy as people imagine,
                 given for instance the difficulty of reusing knowledge
                 already available in Wikipedia. As a result, the heavy
                 burden of upbuilding and maintaining the ever-growing
                 online encyclopedia still rests on a small group of
                 people. In this article, we aim at facilitating wiki
                 authoring by providing annotation recommendations, thus
                 lightening the burden of both contributors and
                 administrators. We leverage the collective wisdom of
                 the users by exploiting Semantic Web technologies with
                 Wikipedia data and adopt a unified algorithm to support
                 link, category, and semantic relation recommendation. A
                 prototype system named EachWiki is proposed and
                 evaluated. The experimental results show that it has
                 achieved considerable improvements in terms of
                 effectiveness, efficiency and usability. The proposed
                 approach can also be applied to other wiki-based
                 collaborative editing systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "71",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lampos:2012:NES,
  author =       "Vasileios Lampos and Nello Cristianini",
  title =        "Nowcasting Events from the Social {Web} with
                 Statistical Learning",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "72:1--72:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337557",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We present a general methodology for inferring the
                 occurrence and magnitude of an event or phenomenon by
                 exploring the rich amount of unstructured textual
                 information on the social part of the Web. Having
                 geo-tagged user posts on the microblogging service of
                 Twitter as our input data, we investigate two case
                 studies. The first consists of a benchmark problem,
                 where actual levels of rainfall in a given location and
                 time are inferred from the content of tweets. The
                 second one is a real-life task, where we infer regional
                 Influenza-like Illness rates in the effort of detecting
                 timely an emerging epidemic disease. Our analysis
                 builds on a statistical learning framework, which
                 performs sparse learning via the bootstrapped version
                 of LASSO to select a consistent subset of textual
                 features from a large amount of candidates. In both
                 case studies, selected features indicate close semantic
                 correlation with the target topics and inference,
                 conducted by regression, has a significant performance,
                 especially given the short length --approximately one
                 year-- of Twitter's data time series.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "72",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tang:2012:RUI,
  author =       "Xuning Tang and Christopher C. Yang",
  title =        "Ranking User Influence in Healthcare Social Media",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "73:1--73:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337558",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Due to the revolutionary development of Web 2.0
                 technology, individual users have become major
                 contributors of Web content in online social media. In
                 light of the growing activities, how to measure a
                 user's influence to other users in online social media
                 becomes increasingly important. This research need is
                 urgent especially in the online healthcare community
                 since positive influence can be beneficial while
                 negative influence may cause-negative impact on other
                 users of the same community. In this article, a
                 research framework was proposed to study user influence
                 within the online healthcare community. We proposed a
                 new approach to incorporate users' reply relationship,
                 conversation content and response immediacy which
                 capture both explicit and implicit interaction between
                 users to identify influential users of online
                 healthcare community. A weighted social network is
                 developed to represent the influence between users. We
                 tested our proposed techniques thoroughly on two
                 medical support forums. Two algorithms UserRank and
                 Weighted in-degree are benchmarked with PageRank and
                 in-degree. Experiment results demonstrated the validity
                 and effectiveness of our proposed approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "73",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Strohmaier:2012:EFI,
  author =       "Markus Strohmaier and Denis Helic and Dominik Benz and
                 Christian K{\"o}rner and Roman Kern",
  title =        "Evaluation of Folksonomy Induction Algorithms",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "74:1--74:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337559",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Algorithms for constructing hierarchical structures
                 from user-generated metadata have caught the interest
                 of the academic community in recent years. In social
                 tagging systems, the output of these algorithms is
                 usually referred to as folksonomies (from
                 folk-generated taxonomies). Evaluation of folksonomies
                 and folksonomy induction algorithms is a challenging
                 issue complicated by the lack of golden standards, lack
                 of comprehensive methods and tools as well as a lack of
                 research and empirical/simulation studies applying
                 these methods. In this article, we report results from
                 a broad comparative study of state-of-the-art
                 folksonomy induction algorithms that we have applied
                 and evaluated in the context of five social tagging
                 systems. In addition to adopting semantic evaluation
                 techniques, we present and adopt a new technique that
                 can be used to evaluate the usefulness of folksonomies
                 for navigation. Our work sheds new light on the
                 properties and characteristics of state-of-the-art
                 folksonomy induction algorithms and introduces a new
                 pragmatic approach to folksonomy evaluation, while at
                 the same time identifying some important limitations
                 and challenges of folksonomy evaluation. Our results
                 show that folksonomy induction algorithms specifically
                 developed to capture intuitions of social tagging
                 systems outperform traditional hierarchical clustering
                 techniques. To the best of our knowledge, this work
                 represents the largest and most comprehensive
                 evaluation study of state-of-the-art folksonomy
                 induction algorithms to date.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "74",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2012:EAL,
  author =       "Xiaoqin Shelley Zhang and Bhavesh Shrestha and
                 Sungwook Yoon and Subbarao Kambhampati and Phillip
                 DiBona and Jinhong K. Guo and Daniel McFarlane and
                 Martin O. Hofmann and Kenneth Whitebread and Darren
                 Scott Appling and Elizabeth T. Whitaker and Ethan B.
                 Trewhitt and Li Ding and James R. Michaelis and Deborah
                 L. McGuinness and James A. Hendler and Janardhan Rao
                 Doppa and Charles Parker and Thomas G. Dietterich and
                 Prasad Tadepalli and Weng-Keen Wong and Derek Green and
                 Anton Rebguns and Diana Spears and Ugur Kuter and Geoff
                 Levine and Gerald DeJong and Reid L. MacTavish and
                 Santiago Onta{\~n}{\'o}n and Jainarayan Radhakrishnan
                 and Ashwin Ram and Hala Mostafa and Huzaifa Zafar and
                 Chongjie Zhang and Daniel Corkill and Victor Lesser and
                 Zhexuan Song",
  title =        "An Ensemble Architecture for Learning Complex
                 Problem-Solving Techniques from Demonstration",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "75:1--75:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337560",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We present a novel ensemble architecture for learning
                 problem-solving techniques from a very small number of
                 expert solutions and demonstrate its effectiveness in a
                 complex real-world domain. The key feature of our
                 ``Generalized Integrated Learning Architecture'' (GILA)
                 is a set of heterogeneous independent learning and
                 reasoning (ILR) components, coordinated by a central
                 meta-reasoning executive (MRE). The ILRs are weakly
                 coupled in the sense that all coordination during
                 learning and performance happens through the MRE. Each
                 ILR learns independently from a small number of expert
                 demonstrations of a complex task. During performance,
                 each ILR proposes partial solutions to subproblems
                 posed by the MRE, which are then selected from and
                 pieced together by the MRE to produce a complete
                 solution. The heterogeneity of the learner-reasoners
                 allows both learning and problem solving to be more
                 effective because their abilities and biases are
                 complementary and synergistic. We describe the
                 application of this novel learning and problem solving
                 architecture to the domain of airspace management,
                 where multiple requests for the use of airspaces need
                 to be deconflicted, reconciled, and managed
                 automatically. Formal evaluations show that our system
                 performs as well as or better than humans after
                 learning from the same training data. Furthermore, GILA
                 outperforms any individual ILR run in isolation, thus
                 demonstrating the power of the ensemble architecture
                 for learning and problem solving.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "75",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2012:LCR,
  author =       "Zhenxing Wang and Laiwan Chan",
  title =        "Learning Causal Relations in Multivariate Time Series
                 Data",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "76:1--76:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337561",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Many applications naturally involve time series data
                 and the vector autoregression (VAR), and the structural
                 VAR (SVAR) are dominant tools to investigate relations
                 between variables in time series. In the first part of
                 this work, we show that the SVAR method is incapable of
                 identifying contemporaneous causal relations for
                 Gaussian process. In addition, least squares estimators
                 become unreliable when the scales of the problems are
                 large and observations are limited. In the remaining
                 part, we propose an approach to apply Bayesian network
                 learning algorithms to identify SVARs from time series
                 data in order to capture both temporal and
                 contemporaneous causal relations, and avoid high-order
                 statistical tests. The difficulty of applying Bayesian
                 network learning algorithms to time series is that the
                 sizes of the networks corresponding to time series tend
                 to be large, and high-order statistical tests are
                 required by Bayesian network learning algorithms in
                 this case. To overcome the difficulty, we show that the
                 search space of conditioning sets d-separating two
                 vertices should be a subset of the Markov blankets.
                 Based on this fact, we propose an algorithm enabling us
                 to learn Bayesian networks locally, and make the
                 largest order of statistical tests independent of the
                 scales of the problems. Empirical results show that our
                 algorithm outperforms existing methods in terms of both
                 efficiency and accuracy.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "76",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Mandrake:2012:SSD,
  author =       "Lukas Mandrake and Umaa Rebbapragada and Kiri L.
                 Wagstaff and David Thompson and Steve Chien and Daniel
                 Tran and Robert T. Pappalardo and Damhnait Gleeson and
                 Rebecca Casta{\~n}o",
  title =        "Surface Sulfur Detection via Remote Sensing and
                 Onboard Classification",
  journal =      j-TIST,
  volume =       "3",
  number =       "4",
  pages =        "77:1--77:??",
  month =        sep,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2337542.2337562",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Nov 6 18:47:26 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Orbital remote sensing provides a powerful way to
                 efficiently survey targets such as the Earth and other
                 planets and moons for features of interest. One such
                 feature of astrobiological relevance is the presence of
                 surface sulfur deposits. These deposits have been
                 observed to be associated with microbial activity at
                 the Borup Fiord glacial springs in Canada, a location
                 that may provide an analogue to other icy environments
                 such as Europa. This article evaluates automated
                 classifiers for detecting sulfur in remote sensing
                 observations by the hyperion spectrometer on the EO-1
                 spacecraft. We determined that a data-driven machine
                 learning solution was needed because the sulfur could
                 not be detected by simply matching observations to
                 sulfur lab spectra. We also evaluated several methods
                 (manual and automated) for identifying the most
                 relevant attributes (spectral wavelengths) needed for
                 successful sulfur detection. Our findings include (1)
                 the Borup Fiord sulfur deposits were best modeled as
                 containing two sub-populations: sulfur on ice and
                 sulfur on rock; (2) as expected, classifiers using
                 Gaussian kernels outperformed those based on linear
                 kernels, and should be adopted when onboard
                 computational constraints permit; and (3) Recursive
                 Feature Elimination selected sensible and effective
                 features for use in the computationally constrained
                 environment onboard EO-1. This study helped guide the
                 selection of algorithm parameters and configuration for
                 the classification system currently operational on
                 EO-1. Finally, we discuss implications for a similar
                 onboard classification system for a future Europa
                 orbiter.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "77",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{King:2013:ISS,
  author =       "Irwin King and Wolfgang Nejdl",
  title =        "Introduction to the special section on {Twitter} and
                 microblogging services",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414426",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cheng:2013:CDF,
  author =       "Zhiyuan Cheng and James Caverlee and Kyumin Lee",
  title =        "A content-driven framework for geolocating microblog
                 users",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414427",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Highly dynamic real-time microblog systems have
                 already published petabytes of real-time human sensor
                 data in the form of status updates. However, the lack
                 of user adoption of geo-based features per user or per
                 post signals that the promise of microblog services as
                 location-based sensing systems may have only limited
                 reach and impact. Thus, in this article, we propose and
                 evaluate a probabilistic framework for estimating a
                 microblog user's location based purely on the content
                 of the user's posts. Our framework can overcome the
                 sparsity of geo-enabled features in these services and
                 bring augmented scope and breadth to emerging
                 location-based personalized information services. Three
                 of the key features of the proposed approach are: (i)
                 its reliance purely on publicly available content; (ii)
                 a classification component for automatically
                 identifying words in posts with a strong local
                 geo-scope; and (iii) a lattice-based neighborhood
                 smoothing model for refining a user's location
                 estimate. On average we find that the location
                 estimates converge quickly, placing 51\% of users
                 within 100 miles of their actual location.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2013:NER,
  author =       "Xiaohua Liu and Furu Wei and Shaodian Zhang and Ming
                 Zhou",
  title =        "Named entity recognition for tweets",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414428",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Two main challenges of Named Entity Recognition (NER)
                 for tweets are the insufficient information in a tweet
                 and the lack of training data. We propose a novel
                 method consisting of three core elements: (1)
                 normalization of tweets; (2) combination of a K-Nearest
                 Neighbors (KNN) classifier with a linear Conditional
                 Random Fields (CRF) model; and (3) semisupervised
                 learning framework. The tweet normalization
                 preprocessing corrects common ill-formed words using a
                 global linear model. The KNN-based classifier conducts
                 prelabeling to collect global coarse evidence across
                 tweets while the CRF model conducts sequential labeling
                 to capture fine-grained information encoded in a tweet.
                 The semisupervised learning plus the gazetteers
                 alleviate the lack of training data. Extensive
                 experiments show the advantages of our method over the
                 baselines as well as the effectiveness of
                 normalization, KNN, and semisupervised learning.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chang:2013:IRR,
  author =       "Yi Chang and Anlei Dong and Pranam Kolari and Ruiqiang
                 Zhang and Yoshiyuki Inagaki and Fernanodo Diaz and
                 Hongyuan Zha and Yan Liu",
  title =        "Improving recency ranking using {Twitter} data",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414429",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In Web search and vertical search, recency ranking
                 refers to retrieving and ranking documents by both
                 relevance and freshness. As impoverished in-links and
                 click information is the biggest challenge for recency
                 ranking, we advocate the use of Twitter data to address
                 the challenge in this article. We propose a method to
                 utilize Twitter TinyURL to detect fresh and
                 high-quality documents, and leverage Twitter data to
                 generate novel and effective features for ranking. The
                 empirical experiments demonstrate that the proposed
                 approach effectively improves a commercial search
                 engine for both Web search ranking and tweet vertical
                 ranking.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Han:2013:LNS,
  author =       "Bo Han and Paul Cook and Timothy Baldwin",
  title =        "Lexical normalization for social media text",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "5:1--5:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414430",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Twitter provides access to large volumes of data in
                 real time, but is notoriously noisy, hampering its
                 utility for NLP. In this article, we target
                 out-of-vocabulary words in short text messages and
                 propose a method for identifying and normalizing
                 lexical variants. Our method uses a classifier to
                 detect lexical variants, and generates correction
                 candidates based on morphophonemic similarity. Both
                 word similarity and context are then exploited to
                 select the most probable correction candidate for the
                 word. The proposed method doesn't require any
                 annotations, and achieves state-of-the-art performance
                 over an SMS corpus and a novel dataset based on
                 Twitter.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shen:2013:RUT,
  author =       "Keyi Shen and Jianmin Wu and Ya Zhang and Yiping Han
                 and Xiaokang Yang and Li Song and Xiao Gu",
  title =        "Reorder user's tweets",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "6:1--6:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414431",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Twitter displays the tweets a user received in a
                 reversed chronological order, which is not always the
                 best choice. As Twitter is full of messages of very
                 different qualities, many informative or relevant
                 tweets might be flooded or displayed at the bottom
                 while some nonsense buzzes might be ranked higher. In
                 this work, we present a supervised learning method for
                 personalized tweets reordering based on user interests.
                 User activities on Twitter, in terms of tweeting,
                 retweeting, and replying, are leveraged to obtain the
                 training data for reordering models. Through exploring
                 a rich set of social and personalized features, we
                 model the relevance of tweets by minimizing the
                 pairwise loss of relevant and irrelevant tweets. The
                 tweets are then reordered according to the predicted
                 relevance scores. Experimental results with real
                 Twitter user activities demonstrated the effectiveness
                 of our method. The new method achieved above 30\%
                 accuracy gain compared with the default ordering in
                 Twitter based on time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Guy:2013:ISS,
  author =       "Ido Guy and Li Chen and Michelle X. Zhou",
  title =        "Introduction to the special section on social
                 recommender systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "7:1--7:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414432",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Quijano-Sanchez:2013:SFG,
  author =       "Lara Quijano-Sanchez and Juan A. Recio-Garcia and
                 Belen Diaz-Agudo and Guillermo Jimenez-Diaz",
  title =        "Social factors in group recommender systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "8:1--8:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414433",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article we review the existing techniques in
                 group recommender systems and we propose some
                 improvement based on the study of the different
                 individual behaviors when carrying out a
                 decision-making process. Our method includes an
                 analysis of group personality composition and trust
                 between each group member to improve the accuracy of
                 group recommenders. This way we simulate the
                 argumentation process followed by groups of people when
                 agreeing on a common activity in a more realistic way.
                 Moreover, we reflect how they expect the system to
                 behave in a long term recommendation process. This is
                 achieved by including a memory of past recommendations
                 that increases the satisfaction of users whose
                 preferences have not been taken into account in
                 previous recommendations.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2013:GVR,
  author =       "Weishi Zhang and Guiguang Ding and Li Chen and
                 Chunping Li and Chengbo Zhang",
  title =        "Generating virtual ratings from {Chinese} reviews to
                 augment online recommendations",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "9:1--9:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414434",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Collaborative filtering (CF) recommenders based on
                 User-Item rating matrix as explicitly obtained from end
                 users have recently appeared promising in recommender
                 systems. However, User-Item rating matrix is not always
                 available or very sparse in some web applications,
                 which has critical impact to the application of CF
                 recommenders. In this article we aim to enhance the
                 online recommender system by fusing virtual ratings as
                 derived from user reviews. Specifically, taking into
                 account of Chinese reviews' characteristics, we propose
                 to fuse the self-supervised emotion-integrated
                 sentiment classification results into CF recommenders,
                 by which the User-Item Rating Matrix can be inferred by
                 decomposing item reviews that users gave to the items.
                 The main advantage of this approach is that it can
                 extend CF recommenders to some web applications without
                 user rating information. In the experiments, we have
                 first identified the self-supervised sentiment
                 classification's higher precision and recall by
                 comparing it with traditional classification methods.
                 Furthermore, the classification results, as behaving as
                 virtual ratings, were incorporated into both user-based
                 and item-based CF algorithms. We have also conducted an
                 experiment to evaluate the proximity between the
                 virtual and real ratings and clarified the
                 effectiveness of the virtual ratings. The experimental
                 results demonstrated the significant impact of virtual
                 ratings on increasing system's recommendation accuracy
                 in different data conditions (i.e., conditions with
                 real ratings and without).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Biancalana:2013:ASR,
  author =       "Claudio Biancalana and Fabio Gasparetti and Alessandro
                 Micarelli and Giuseppe Sansonetti",
  title =        "An approach to social recommendation for context-aware
                 mobile services",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "10:1--10:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414435",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Nowadays, several location-based services (LBSs) allow
                 their users to take advantage of information from the
                 Web about points of interest (POIs) such as cultural
                 events or restaurants. To the best of our knowledge,
                 however, none of these provides information taking into
                 account user preferences, or other elements, in
                 addition to location, that contribute to define the
                 context of use. The provided suggestions do not
                 consider, for example, time, day of week, weather, user
                 activity or means of transport. This article describes
                 a social recommender system able to identify user
                 preferences and information needs, thus suggesting
                 personalized recommendations related to POIs in the
                 surroundings of the user's current location. The
                 proposed approach achieves the following goals: (i) to
                 supply, unlike the current LBSs, a methodology for
                 identifying user preferences and needs to be used in
                 the information filtering process; (ii) to exploit the
                 ever-growing amount of information from social
                 networking, user reviews, and local search Web sites;
                 (iii) to establish procedures for defining the context
                 of use to be employed in the recommendation of POIs
                 with low effort. The flexibility of the architecture is
                 such that our approach can be easily extended to any
                 category of POI. Experimental tests carried out on real
                 users enabled us to quantify the benefits of the
                 proposed approach in terms of performance
                 improvement.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gedikli:2013:IRA,
  author =       "Fatih Gedikli and Dietmar Jannach",
  title =        "Improving recommendation accuracy based on
                 item-specific tag preferences",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "11:1--11:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414436",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In recent years, different proposals have been made to
                 exploit Social Web tagging information to build more
                 effective recommender systems. The tagging data, for
                 example, were used to identify similar users or were
                 viewed as additional information about the
                 recommendable items. Recent research has indicated that
                 ``attaching feelings to tags'' is experienced by users
                 as a valuable means to express which features of an
                 item they particularly like or dislike. When following
                 such an approach, users would therefore not only add
                 tags to an item as in usual Web 2.0 applications, but
                 also attach a preference ( affect ) to the tag itself,
                 expressing, for example, whether or not they liked a
                 certain actor in a given movie. In this work, we show
                 how this additional preference data can be exploited by
                 a recommender system to make more accurate predictions.
                 In contrast to previous work, which also relied on
                 so-called tag preferences to enhance the predictive
                 accuracy of recommender systems, we argue that tag
                 preferences should be considered in the context of an
                 item. We therefore propose new schemes to infer and
                 exploit context-specific tag preferences in the
                 recommendation process. An evaluation on two different
                 datasets reveals that our approach is capable of
                 providing more accurate recommendations than previous
                 tag-based recommender algorithms and recent
                 tag-agnostic matrix factorization techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2013:MRW,
  author =       "Yu-Chih Chen and Yu-Shi Lin and Yu-Chun Shen and
                 Shou-De Lin",
  title =        "A modified random walk framework for handling negative
                 ratings and generating explanations",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "12:1--12:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414437",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The concept of random walk (RW) has been widely
                 applied in the design of recommendation systems.
                 RW-based approaches are effective in handling locality
                 problem and taking extra information, such as the
                 relationships between items or users, into
                 consideration. However, the traditional RW-based
                 approach has a serious limitation in handling
                 bidirectional opinions. The propagation of positive and
                 negative information simultaneously in a graph is
                 nontrivial using random walk. To address the problem,
                 this article presents a novel and efficient RW-based
                 model that can handle both positive and negative
                 comments with the guarantee of convergence.
                 Furthermore, we argue that a good recommendation system
                 should provide users not only a list of recommended
                 items but also reasonable explanations for the
                 decisions. Therefore, we propose a technique that
                 generates explanations by backtracking the influential
                 paths and subgraphs. The results of experiments on the
                 MovieLens and Netflix datasets show that our model
                 significantly outperforms state-of-the-art RW-based
                 algorithms, and is capable of improving the overall
                 performance in the ensemble with other models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Said:2013:MRC,
  author =       "Alan Said and Shlomo Berkovsky and Ernesto W. {De
                 Luca}",
  title =        "Movie recommendation in context",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "13:1--13:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414438",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The challenge and workshop on Context-Aware Movie
                 Recommendation (CAMRa2010) were conducted jointly in
                 2010 with the Recommender Systems conference. The
                 challenge focused on three context-aware recommendation
                 scenarios: time-based, mood-based, and social
                 recommendation. The participants were provided with
                 anonymized datasets from two real-world online movie
                 recommendation communities and competed against each
                 other for obtaining the highest accuracy of
                 recommendations. The datasets contained contextual
                 features, such as tags, annotation, social
                 relationsips, and comments, normally not available in
                 public recommendation datasets. More than 40 teams from
                 21 countries participated in the challenge. Their
                 participation was summarized by 10 papers published by
                 the workshop, which have been extended and revised for
                 this special section. In this preface we overview the
                 challenge datasets, tasks, evaluation metrics, and the
                 obtained outcomes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bellogin:2013:ECS,
  author =       "Alejandro Bellog{\'\i}n and Iv{\'a}n Cantador and
                 Fernando D{\'\i}ez and Pablo Castells and Enrique
                 Chavarriaga",
  title =        "An empirical comparison of social, collaborative
                 filtering, and hybrid recommenders",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "14:1--14:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414439",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In the Social Web, a number of diverse recommendation
                 approaches have been proposed to exploit the user
                 generated contents available in the Web, such as
                 rating, tagging, and social networking information. In
                 general, these approaches naturally require the
                 availability of a wide amount of these user
                 preferences. This may represent an important limitation
                 for real applications, and may be somewhat unnoticed in
                 studies focusing on overall precision, in which a
                 failure to produce recommendations gets blurred when
                 averaging the obtained results or, even worse, is just
                 not accounted for, as users with no recommendations are
                 typically excluded from the performance calculations.
                 In this article, we propose a coverage metric that
                 uncovers and compensates for the incompleteness of
                 performance evaluations based only on precision. We use
                 this metric together with precision metrics in an
                 empirical comparison of several social, collaborative
                 filtering, and hybrid recommenders. The obtained
                 results show that a better balance between precision
                 and coverage can be achieved by combining social-based
                 filtering (high accuracy, low coverage) and
                 collaborative filtering (low accuracy, high coverage)
                 recommendation techniques. We thus explore several
                 hybrid recommendation approaches to balance this
                 trade-off. In particular, we compare, on the one hand,
                 techniques integrating collaborative and social
                 information into a single model, and on the other,
                 linear combinations of recommenders. For the last
                 approach, we also propose a novel strategy to
                 dynamically adjust the weight of each recommender on a
                 user-basis, utilizing graph measures as indicators of
                 the target user's connectedness and relevance in a
                 social network.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2013:STC,
  author =       "Nathan N. Liu and Luheng He and Min Zhao",
  title =        "Social temporal collaborative ranking for context
                 aware movie recommendation",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "15:1--15:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414440",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Most existing collaborative filtering models only
                 consider the use of user feedback (e.g., ratings) and
                 meta data (e.g., content, demographics). However, in
                 most real world recommender systems, context
                 information, such as time and social networks, are also
                 very important factors that could be considered in
                 order to produce more accurate recommendations. In this
                 work, we address several challenges for the context
                 aware movie recommendation tasks in CAMRa 2010: (1) how
                 to combine multiple heterogeneous forms of user
                 feedback? (2) how to cope with dynamic user and item
                 characteristics? (3) how to capture and utilize social
                 connections among users? For the first challenge, we
                 propose a novel ranking based matrix factorization
                 model to aggregate explicit and implicit user feedback.
                 For the second challenge, we extend this model to a
                 sequential matrix factorization model to enable
                 time-aware parametrization. Finally, we introduce a
                 network regularization function to constrain user
                 parameters based on social connections. To the best of
                 our knowledge, this is the first study that
                 investigates the collective modeling of social and
                 temporal dynamics. Experiments on the CAMRa 2010
                 dataset demonstrated clear improvements over many
                 baselines.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shi:2013:MCM,
  author =       "Yue Shi and Martha Larson and Alan Hanjalic",
  title =        "Mining contextual movie similarity with matrix
                 factorization for context-aware recommendation",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "16:1--16:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414441",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Context-aware recommendation seeks to improve
                 recommendation performance by exploiting various
                 information sources in addition to the conventional
                 user-item matrix used by recommender systems. We
                 propose a novel context-aware movie recommendation
                 algorithm based on joint matrix factorization (JMF). We
                 jointly factorize the user-item matrix containing
                 general movie ratings and other contextual movie
                 similarity matrices to integrate contextual information
                 into the recommendation process. The algorithm was
                 developed within the scope of the mood-aware
                 recommendation task that was offered by the Moviepilot
                 mood track of the 2010 context-aware movie
                 recommendation (CAMRa) challenge. Although the
                 algorithm could generalize to other types of contextual
                 information, in this work, we focus on two: movie mood
                 tags and movie plot keywords. Since the objective in
                 this challenge track is to recommend movies for a user
                 given a specified mood, we devise a novel mood-specific
                 movie similarity measure for this purpose. We enhance
                 the recommendation based on this measure by also
                 deploying the second movie similarity measure proposed
                 in this article that takes into account the movie plot
                 keywords. We validate the effectiveness of the proposed
                 JMF algorithm with respect to the recommendation
                 performance by carrying out experiments on the
                 Moviepilot challenge dataset. We demonstrate that
                 exploiting contextual information in JMF leads to
                 significant improvement over several state-of-the-art
                 approaches that generate movie recommendations without
                 using contextual information. We also demonstrate that
                 our proposed mood-specific movie similarity is better
                 suited for the task than the conventional mood-based
                 movie similarity measures. Finally, we show that the
                 enhancement provided by the movie similarity capturing
                 the plot keywords is particularly helpful in improving
                 the recommendation to those users who are significantly
                 more active in rating the movies than other users.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Okada:2013:MDA,
  author =       "Isamu Okada and Hitoshi Yamamoto",
  title =        "Mathematical description and analysis of adaptive risk
                 choice behavior",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "17:1--17:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414442",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Which risk should one choose when facing alternatives
                 with different levels of risk? We discuss here adaptive
                 processes in such risk choice behavior by generalizing
                 the study of Roos et al. [2010]. We deal with an n
                 -choice game in which every player sequentially chooses
                 n times of lotteries of which there are two types: a
                 safe lottery and a risky lottery. We analyze this model
                 in more detail by elaborating the game. Based on the
                 results of mathematical analysis, replicator dynamics
                 analysis, and numerical simulations, we derived some
                 salient features of risk choice behavior. We show that
                 all the risk strategies can be divided into two groups:
                 persistence and nonpersistence. We also proved that the
                 dynamics with perturbation in which a mutation is
                 installed is globally asymptotically stable to a unique
                 equilibrium point for any initial population. The
                 numerical simulations clarify that the number of
                 persistent strategies seldom increases regardless of
                 the increase in n, and suggest that a rarity of
                 dominant choice strategies is widely observed in many
                 social contexts. These facts not only go hand-in-hand
                 with some well-known insights from prospect theory, but
                 may also provide some theoretical hypotheses for
                 various fields such as behavioral economics, ecology,
                 sociology, and consumer behavioral theory.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Song:2013:OSM,
  author =       "Xuan Song and Huijing Zhao and Jinshi Cui and Xiaowei
                 Shao and Ryosuke Shibasaki and Hongbin Zha",
  title =        "An online system for multiple interacting targets
                 tracking: Fusion of laser and vision, tracking and
                 learning",
  journal =      j-TIST,
  volume =       "4",
  number =       "1",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2414425.2414443",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Multitarget tracking becomes significantly more
                 challenging when the targets are in close proximity or
                 frequently interact with each other. This article
                 presents a promising online system to deal with these
                 problems. The novelty of this system is that laser and
                 vision are integrated with tracking and online learning
                 to complement each other in one framework: when the
                 targets do not interact with each other, the
                 laser-based independent trackers are employed and the
                 visual information is extracted simultaneously to train
                 some classifiers online for ``possible interacting
                 targets''. When the targets are in close proximity, the
                 classifiers learned online are used alongside visual
                 information to assist in tracking. Therefore, this mode
                 of cooperation not only deals with various tough
                 problems encountered in tracking, but also ensures that
                 the entire process can be completely online and
                 automatic. Experimental results demonstrate that laser
                 and vision fully display their respective advantages in
                 our system, and it is easy for us to obtain a good
                 trade-off between tracking accuracy and the time-cost
                 factor.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chopra:2013:ISS,
  author =       "Amit K. Chopra and Alexander Artikis and Jamal
                 Bentahar and Frank Dignum",
  title =        "Introduction to the special section on agent
                 communication",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "19:1--19:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chopra:2013:RDA,
  author =       "Amit K. Chopra and Alexander Artikis and Jamal
                 Bentahar and Marco Colombetti and Frank Dignum and
                 Nicoletta Fornara and Andrew J. I. Jones and Munindar
                 P. Singh and Pinar Yolum",
  title =        "Research directions in agent communication",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "20:1--20:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Increasingly, software engineering involves open
                 systems consisting of autonomous and heterogeneous
                 participants or agents who carry out loosely coupled
                 interactions. Accordingly, understanding and specifying
                 communications among agents is a key concern. A focus
                 on ways to formalize meaning distinguishes agent
                 communication from traditional distributed computing:
                 meaning provides a basis for flexible interactions and
                 compliance checking. Over the years, a number of
                 approaches have emerged with some essential and some
                 irrelevant distinctions drawn among them. As agent
                 abstractions gain increasing traction in the software
                 engineering of open systems, it is important to resolve
                 the irrelevant and highlight the essential
                 distinctions, so that future research can be focused in
                 the most productive directions. This article is an
                 outcome of extensive discussions among agent
                 communication researchers, aimed at taking stock of the
                 field and at developing, criticizing, and refining
                 their positions on specific approaches and future
                 challenges. This article serves some important
                 purposes, including identifying (1) points of broad
                 consensus; (2) points where substantive differences
                 remain; and (3) interesting directions of future
                 work.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gerard:2013:FVP,
  author =       "Scott N. Gerard and Munindar P. Singh",
  title =        "Formalizing and verifying protocol refinements",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "21:1--21:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "A (business) protocol describes, in high-level terms,
                 a pattern of communication between two or more
                 participants, specifically via the creation and
                 manipulation of the commitments between them. In this
                 manner, a protocol offers both flexibility and rigor: a
                 participant may communicate in any way it chooses as
                 long as it discharges all of its activated commitments.
                 Protocols thus promise benefits in engineering
                 cross-organizational business processes. However,
                 software engineering using protocols presupposes a
                 formalization of protocols and a notion of the
                 refinement of one protocol by another. Refinement for
                 protocols is both intuitively obvious (e.g.,
                 PayViaCheck is clearly a kind of Pay ) and technically
                 nontrivial (e.g., compared to Pay, PayViaCheck involves
                 different participants exchanging different messages).
                 This article formalizes protocols and their refinement.
                 It develops Proton, an analysis tool for protocol
                 specifications that overlays a model checker to compute
                 whether one protocol refines another with respect to a
                 stated mapping. Proton and its underlying theory are
                 evaluated by formalizing several protocols from the
                 literature and verifying all and only the expected
                 refinements.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Baldoni:2013:CRS,
  author =       "Matteo Baldoni and Cristina Baroglio and Elisa Marengo
                 and Viviana Patti",
  title =        "Constitutive and regulative specifications of
                 commitment protocols: a decoupled approach",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "22:1--22:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Interaction protocols play a fundamental role in
                 multiagent systems. In this work, after analyzing the
                 trends that are emerging not only from research on
                 multiagent interaction protocols but also from
                 neighboring fields, like research on workflows and
                 business processes, we propose a novel definition of
                 commitment-based interaction protocols, that is
                 characterized by the decoupling of the constitutive and
                 the regulative specifications and that explicitly
                 foresees a representation of the latter based on
                 constraints among commitments. A clear distinction
                 between the two representations has many advantages,
                 mainly residing in a greater openness of multiagent
                 systems, and an easier reuse of protocols and of action
                 definitions. A language, named 2CL, for writing
                 regulative specifications is also given together with a
                 designer-oriented graphical notation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Falcone:2013:ISS,
  author =       "Rino Falcone and Munindar P. Singh",
  title =        "Introduction to special section on trust in multiagent
                 systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "23:1--23:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2013:FTM,
  author =       "Jie Zhang and Robin Cohen",
  title =        "A framework for trust modeling in multiagent
                 electronic marketplaces with buying advisors to
                 consider varying seller behavior and the limiting of
                 seller bids",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "24:1--24:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we present a framework of use in
                 electronic marketplaces that allows buying agents to
                 model the trustworthiness of selling agents in an
                 effective way, making use of seller ratings provided by
                 other buying agents known as advisors. The
                 trustworthiness of the advisors is also modeled, using
                 an approach that combines both personal and public
                 knowledge and allows the relative weighting to be
                 adjusted over time. Through a series of experiments
                 that simulate e-marketplaces, including ones where
                 sellers may vary their behavior over time, we are able
                 to demonstrate that our proposed framework delivers
                 effective seller recommendations to buyers, resulting
                 in important buyer profit. We also propose limiting
                 seller bids as a method for promoting seller honesty,
                 thus facilitating successful selection of sellers by
                 buyers, and demonstrate the value of this approach
                 through experimental results. Overall, this research is
                 focused on the technological aspects of electronic
                 commerce and specifically on technology that would be
                 used to manage trust.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Erriquez:2013:BUS,
  author =       "Elisabetta Erriquez and Wiebe van der Hoek and Michael
                 Wooldridge",
  title =        "Building and using social structures: a case study
                 using the agent {ART} testbed",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "25:1--25:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article investigates the conjecture that agents
                 who make decisions in scenarios where trust is
                 important can benefit from the use of a social
                 structure, representing the social relationships that
                 exist between agents. We propose techniques that can be
                 used by agents to initially build and then
                 progressively update such a structure in the light of
                 experience. We describe an implementation of our
                 techniques in the domain of the Agent ART testbed: we
                 take two existing agents for this domain (``Simplet''
                 and ``Connected'') and compare their performance with
                 versions that use our social structure
                 (``SocialSimplet'' and ``SocialConnected''). We show
                 that SocialSimplet and SocialConnected outperform their
                 counterparts with respect to the quality of the
                 interactions, the number of rounds won in a
                 competition, and the total utility gained.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Burnett:2013:STB,
  author =       "Chris Burnett and Timothy J. Norman and Katia Sycara",
  title =        "Stereotypical trust and bias in dynamic multiagent
                 systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "26:1--26:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Large-scale multiagent systems have the potential to
                 be highly dynamic. Trust and reputation are crucial
                 concepts in these environments, as it may be necessary
                 for agents to rely on their peers to perform as
                 expected, and learn to avoid untrustworthy partners.
                 However, aspects of highly dynamic systems introduce
                 issues which make the formation of trust relationships
                 difficult. For example, they may be short-lived,
                 precluding agents from gaining the necessary
                 experiences to make an accurate trust evaluation. This
                 article describes a new approach, inspired by theories
                 of human organizational behavior, whereby agents
                 generalize their experiences with previously
                 encountered partners as stereotypes, based on the
                 observable features of those partners and their
                 behaviors. Subsequently, these stereotypes are applied
                 when evaluating new and unknown partners. Furthermore,
                 these stereotypical opinions can be communicated within
                 the society, resulting in the notion of stereotypical
                 reputation. We show how this approach can complement
                 existing state-of-the-art trust models, and enhance the
                 confidence in the evaluations that can be made about
                 trustees when direct and reputational information is
                 lacking or limited. Furthermore, we show how a
                 stereotyping approach can help agents detect unwanted
                 biases in the reputational opinions they receive from
                 others in the society.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Falcone:2013:MKR,
  author =       "Rino Falcone and Michele Piunti and Matteo Venanzi and
                 Cristiano Castelfranchi",
  title =        "From manifesta to krypta: The relevance of categories
                 for trusting others",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "27:1--27:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article we consider the special abilities
                 needed by agents for assessing trust based on inference
                 and reasoning. We analyze the case in which it is
                 possible to infer trust towards unknown counterparts by
                 reasoning on abstract classes or categories of agents
                 shaped in a concrete application domain. We present a
                 scenario of interacting agents providing a
                 computational model implementing different strategies
                 to assess trust. Assuming a medical domain, categories,
                 including both competencies and dispositions of
                 possible trustees, are exploited to infer trust towards
                 possibly unknown counterparts. The proposed approach
                 for the cognitive assessment of trust relies on agents'
                 abilities to analyze heterogeneous information sources
                 along different dimensions. Trust is inferred based on
                 specific observable properties (manifesta), namely
                 explicitly readable signals indicating internal
                 features (krypta) regulating agents' behavior and
                 effectiveness on specific tasks. Simulative experiments
                 evaluate the performance of trusting agents adopting
                 different strategies to delegate tasks to possibly
                 unknown trustees, while experimental results show the
                 relevance of this kind of cognitive ability in the case
                 of open multiagent systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2013:ISS,
  author =       "Qing Li and Xiangfeng Luo and Liu Wenyin and Cristina
                 Conati",
  title =        "Introduction to the special section on intelligent
                 tutoring and coaching systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "28:1--28:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Folsom-Kovarik:2013:TPR,
  author =       "Jeremiah T. Folsom-Kovarik and Gita Sukthankar and Sae
                 Schatz",
  title =        "Tractable {POMDP} representations for intelligent
                 tutoring systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "29:1--29:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With Partially Observable Markov Decision Processes
                 (POMDPs), Intelligent Tutoring Systems (ITSs) can model
                 individual learners from limited evidence and plan
                 ahead despite uncertainty. However, POMDPs need
                 appropriate representations to become tractable in ITSs
                 that model many learner features, such as mastery of
                 individual skills or the presence of specific
                 misconceptions. This article describes two POMDP
                 representations- state queues and observation chains
                 -that take advantage of ITS task properties and let
                 POMDPs scale to represent over 100 independent learner
                 features. A real-world military training problem is
                 given as one example. A human study ( n = 14) provides
                 initial validation for the model construction. Finally,
                 evaluating the experimental representations with
                 simulated students helps predict their impact on ITS
                 performance. The compressed representations can model a
                 wide range of simulated problems with instructional
                 efficacy equal to lossless representations. With
                 improved tractability, POMDP ITSs can accommodate more
                 numerous or more detailed learner states and inputs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yen:2013:LIS,
  author =       "Neil Y. Yen and Timothy K. Shih and Qun Jin",
  title =        "{LONET}: an interactive search network for intelligent
                 lecture path generation",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "30:1--30:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Sharing resources and information on the Internet has
                 become an important activity for education. In distance
                 learning, instructors can benefit from resources, also
                 known as Learning Objects (LOs), to create plenteous
                 materials for specific learning purposes. Our
                 repository (called the MINE Registry) has been
                 developed for storing and sharing learning objects,
                 around 22,000 in total, in the past few years. To
                 enhance reusability, one significant concept named
                 Reusability Tree was implemented to trace the process
                 of changes. Also, weighting and ranking metrics have
                 been proposed to enhance the searchability in the
                 repository. Following the successful implementation,
                 this study goes further to investigate the
                 relationships between LOs from a perspective of social
                 networks. The LONET (Learning Object Network), as an
                 extension of Reusability Tree, is newly proposed and
                 constructed to clarify the vague reuse scenario in the
                 past, and to summarize collaborative intelligence
                 through past interactive usage experiences. We define a
                 social structure in our repository based on past usage
                 experiences from instructors, by proposing a set of
                 metrics to evaluate the interdependency such as
                 prerequisites and references. The structure identifies
                 usage experiences and can be graphed in terms of
                 implicit and explicit relations among learning objects.
                 As a practical contribution, an adaptive algorithm is
                 proposed to mine the social structure in our
                 repository. The algorithm generates adaptive routes,
                 based on past usage experiences, by computing possible
                 interactive input, such as search criteria and feedback
                 from instructors, and assists them in generating
                 specific lectures.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ehara:2013:PRS,
  author =       "Yo Ehara and Nobuyuki Shimizu and Takashi Ninomiya and
                 Hiroshi Nakagawa",
  title =        "Personalized reading support for second-language {Web}
                 documents",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "31:1--31:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "A novel intelligent interface eases the browsing of
                 Web documents written in the second languages of users.
                 It automatically predicts words unfamiliar to the user
                 by a collective intelligence method and glosses them
                 with their meaning in advance. If the prediction
                 succeeds, the user does not need to consult a
                 dictionary; even if it fails, the user can correct the
                 prediction. The correction data are collected and used
                 to improve the accuracy of further predictions. The
                 prediction is personalized in that every user's
                 language ability is estimated by a state-of-the-art
                 language testing model, which is trained in a practical
                 response time with only a small sacrifice of prediction
                 accuracy. The system was evaluated in terms of
                 prediction accuracy and reading simulation. The reading
                 simulation results show that this system can reduce the
                 number of clicks for most readers with insufficient
                 vocabulary to read documents and can significantly
                 reduce the remaining number of unfamiliar words after
                 the prediction and glossing for all users.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2013:RCI,
  author =       "Fei-Yue Wang and Pak Kin Wong",
  title =        "Research commentary: Intelligent systems and
                 technology for integrative and predictive medicine: an
                 {ACP} approach",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "32:1--32:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "One of the principal goals in medicine is to determine
                 and implement the best treatment for patients through
                 fastidious estimation of the effects and benefits of
                 therapeutic procedures. The inherent complexities of
                 physiological and pathological networks that span
                 across orders of magnitude in time and length scales,
                 however, represent fundamental hurdles in determining
                 effective treatments for patients. Here we argue for a
                 new approach, called the ACP-based approach, that
                 combines artificial (societies), computational
                 (experiments), and parallel (execution) methods in
                 intelligent systems and technology for integrative and
                 predictive medicine, or more generally, precision
                 medicine and smart health management. The advent of
                 artificial societies that collect the clinically
                 relevant information in prognostics and therapeutics
                 provides a promising platform for organizing and
                 experimenting complex physiological systems toward
                 integrative medicine. The ability of computational
                 experiments to analyze distinct, interactive systems
                 such as the host mechanisms, pathological pathways, and
                 therapeutic strategies, as well as other factors using
                 the artificial systems, will enable control and
                 management through parallel execution of real and
                 artificial systems concurrently within the integrative
                 medicine context. The development of this framework in
                 integrative medicine, fueled by close collaborations
                 between physicians, engineers, and scientists, will
                 result in preventive and predictive practices of a
                 personal, proactive, and precise nature, including
                 rational combinatorial treatments, adaptive
                 therapeutics, and patient-oriented disease
                 management.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tabia:2013:PBA,
  author =       "Hedi Tabia and Mohamed Daoudi and Jean-Philippe
                 Vandeborre and Olivier Colot",
  title =        "A parts-based approach for automatic {$3$D} shape
                 categorization using belief functions",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "33:1--33:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Grouping 3D objects into (semantically) meaningful
                 categories is a challenging and important problem in 3D
                 mining and shape processing. Here, we present a novel
                 approach to categorize 3D objects. The method described
                 in this article, is a belief-function-based approach
                 and consists of two stages: the training stage, where
                 3D objects in the same category are processed and a set
                 of representative parts is constructed, and the
                 labeling stage, where unknown objects are categorized.
                 The experimental results obtained on the Tosca-Sumner
                 and the Shrec07 datasets show that the system
                 efficiently performs in categorizing 3D models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2013:LIC,
  author =       "Zhengxiang Wang and Yiqun Hu and Liang-Tien Chia",
  title =        "Learning image-to-class distance metric for image
                 classification",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "34:1--34:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Image-To-Class (I2C) distance is a novel distance used
                 for image classification and has successfully handled
                 datasets with large intra-class variances. However, it
                 uses Euclidean distance for measuring the distance
                 between local features in different classes, which may
                 not be the optimal distance metric in real image
                 classification problems. In this article, we propose a
                 distance metric learning method to improve the
                 performance of I2C distance by learning per-class
                 Mahalanobis metrics in a large margin framework. Our
                 I2C distance is adaptive to different classes by
                 combining with the learned metric for each class. These
                 multiple per-class metrics are learned simultaneously
                 by forming a convex optimization problem with the
                 constraints that the I2C distance from each training
                 image to its belonging class should be less than the
                 distances to other classes by a large margin. A
                 subgradient descent method is applied to efficiently
                 solve this optimization problem. For efficiency and
                 scalability to large-scale problems, we also show how
                 to simplify the method to learn a diagonal matrix for
                 each class. We show in experiments that our learned
                 Mahalanobis I2C distance can significantly outperform
                 the original Euclidean I2C distance as well as other
                 distance metric learning methods in several prevalent
                 image datasets, and our simplified diagonal matrices
                 can preserve the performance but significantly speed up
                 the metric learning procedure for large-scale datasets.
                 We also show in experiment that our method is able to
                 correct the class imbalance problem, which usually
                 leads the NN-based methods toward classes containing
                 more training images.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "34",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Song:2013:FOU,
  author =       "Xuan Song and Xiaowei Shao and Quanshi Zhang and
                 Ryosuke Shibasaki and Huijing Zhao and Jinshi Cui and
                 Hongbin Zha",
  title =        "A fully online and unsupervised system for large and
                 high-density area surveillance: Tracking, semantic
                 scene learning and abnormality detection",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "35:1--35:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "For reasons of public security, an intelligent
                 surveillance system that can cover a large, crowded
                 public area has become an urgent need. In this article,
                 we propose a novel laser-based system that can
                 simultaneously perform tracking, semantic scene
                 learning, and abnormality detection in a fully online
                 and unsupervised way. Furthermore, these three tasks
                 cooperate with each other in one framework to improve
                 their respective performances. The proposed system has
                 the following key advantages over previous ones: (1) It
                 can cover quite a large area (more than 60$ \times
                 $35m), and simultaneously perform robust tracking,
                 semantic scene learning, and abnormality detection in a
                 high-density situation. (2) The overall system can vary
                 with time, incrementally learn the structure of the
                 scene, and perform fully online abnormal activity
                 detection and tracking. This feature makes our system
                 suitable for real-time applications. (3) The
                 surveillance tasks are carried out in a fully
                 unsupervised manner, so that there is no need for
                 manual labeling and the construction of huge training
                 datasets. We successfully apply the proposed system to
                 the JR subway station in Tokyo, and demonstrate that it
                 can cover an area of 60$ \times $35m, robustly track
                 more than 150 targets at the same time, and
                 simultaneously perform online semantic scene learning
                 and abnormality detection with no human intervention.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "35",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tran:2013:CPB,
  author =       "Vien Tran and Khoi Nguyen and Tran Cao Son and Enrico
                 Pontelli",
  title =        "A conformant planner based on approximation:
                 {CpA(H)}",
  journal =      j-TIST,
  volume =       "4",
  number =       "2",
  pages =        "36:1--36:??",
  month =        mar,
  year =         "2013",
  CODEN =        "????",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Sun May 5 09:06:55 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article describes the planner C pA( H ), the
                 recipient of the Best Nonobservable Nondeterministic
                 Planner Award in the ``Uncertainty Track'' of the 6
                 $^{th}$ International Planning Competition (IPC), 2008.
                 The article presents the various techniques that help
                 CpA( H ) to achieve the level of performance and
                 scalability exhibited in the competition. The article
                 also presents experimental results comparing CpA( H )
                 with state-of-the-art conformant planners.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "36",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2013:ISS,
  author =       "Haifeng Wang and Bill Dolan and Idan Szpektor and
                 Shiqi Zhao",
  title =        "Introduction to special section on paraphrasing",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "37:1--37:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483670",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "37",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Resnik:2013:UTP,
  author =       "Philip Resnik and Olivia Buzek and Yakov Kronrod and
                 Chang Hu and Alexander J. Quinn and Benjamin B.
                 Bederson",
  title =        "Using targeted paraphrasing and monolingual
                 crowdsourcing to improve translation",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "38:1--38:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483671",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Targeted paraphrasing is a new approach to the problem
                 of obtaining cost-effective, reasonable quality
                 translation, which makes use of simple and inexpensive
                 human computations by monolingual speakers in
                 combination with machine translation. The key insight
                 behind the process is that it is possible to spot
                 likely translation errors with only monolingual
                 knowledge of the target language, and it is possible to
                 generate alternative ways to say the same thing (i.e.,
                 paraphrases) with only monolingual knowledge of the
                 source language. Formal evaluation demonstrates that
                 this approach can yield substantial improvements in
                 translation quality, and the idea has been integrated
                 into a broader framework for monolingual collaborative
                 translation that produces fully accurate, fully fluent
                 translations for a majority of sentences in a
                 real-world translation task, with no involvement of
                 human bilingual speakers.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "38",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Marton:2013:DPP,
  author =       "Yuval Marton",
  title =        "Distributional phrasal paraphrase generation for
                 statistical machine translation",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "39:1--39:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483672",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Paraphrase generation has been shown useful for
                 various natural language processing tasks, including
                 statistical machine translation. A commonly used method
                 for paraphrase generation is pivoting [Callison-Burch
                 et al. 2006], which benefits from linguistic knowledge
                 implicit in the sentence alignment of parallel texts,
                 but has limited applicability due to its reliance on
                 parallel texts. Distributional paraphrasing [Marton et
                 al. 2009a] has wider applicability, is more
                 language-independent, but doesn't benefit from any
                 linguistic knowledge. Nevertheless, we show that using
                 distributional paraphrasing can yield greater gains in
                 translation tasks. We report method improvements
                 leading to higher gains than previously published, of
                 almost 2 B leu points, and provide implementation
                 details, complexity analysis, and further insight into
                 this method.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "39",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Madnani:2013:GTP,
  author =       "Nitin Madnani and Bonnie J. Dorr",
  title =        "Generating targeted paraphrases for improved
                 translation",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "40:1--40:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483673",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Today's Statistical Machine Translation (SMT) systems
                 require high-quality human translations for parameter
                 tuning, in addition to large bitexts for learning the
                 translation units. This parameter tuning usually
                 involves generating translations at different points in
                 the parameter space and obtaining feedback against
                 human-authored reference translations as to how good
                 the translations. This feedback then dictates what
                 point in the parameter space should be explored next.
                 To measure this feedback, it is generally considered
                 wise to have multiple (usually 4) reference
                 translations to avoid unfair penalization of
                 translation hypotheses which could easily happen given
                 the large number of ways in which a sentence can be
                 translated from one language to another. However, this
                 reliance on multiple reference translations creates a
                 problem since they are labor intensive and expensive to
                 obtain. Therefore, most current MT datasets only
                 contain a single reference. This leads to the problem
                 of reference sparsity. In our previously published
                 research, we had proposed the first paraphrase-based
                 solution to this problem and evaluated its effect on
                 Chinese--English translation. In this article, we first
                 present extended results for that solution on
                 additional source languages. More importantly, we
                 present a novel way to generate ``targeted''
                 paraphrases that yields substantially larger gains (up
                 to 2.7 BLEU points) in translation quality when
                 compared to our previous solution (up to 1.6 BLEU
                 points). In addition, we further validate these
                 improvements by supplementing with human preference
                 judgments obtained via Amazon Mechanical Turk.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "40",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cohn:2013:AAS,
  author =       "Trevor Cohn and Mirella Lapata",
  title =        "An abstractive approach to sentence compression",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "41:1--41:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483674",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article we generalize the sentence compression
                 task. Rather than simply shorten a sentence by deleting
                 words or constituents, as in previous work, we rewrite
                 it using additional operations such as substitution,
                 reordering, and insertion. We present an experimental
                 study showing that humans can naturally create
                 abstractive sentences using a variety of rewrite
                 operations, not just deletion. We next create a new
                 corpus that is suited to the abstractive compression
                 task and formulate a discriminative tree-to-tree
                 transduction model that can account for structural and
                 lexical mismatches. The model incorporates a grammar
                 extraction method, uses a language model for coherent
                 output, and can be easily tuned to a wide range of
                 compression-specific loss functions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "41",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Moon:2013:IBM,
  author =       "Taesun Moon and Katrin Erk",
  title =        "An inference-based model of word meaning in context as
                 a paraphrase distribution",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "42:1--42:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483675",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Graded models of word meaning in context characterize
                 the meaning of individual usages (occurrences) without
                 reference to dictionary senses. We introduce a novel
                 approach that frames the task of computing word meaning
                 in context as a probabilistic inference problem. The
                 model represents the meaning of a word as a probability
                 distribution over potential paraphrases, inferred using
                 an undirected graphical model. Evaluated on
                 paraphrasing tasks, the model achieves state-of-the-art
                 performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "42",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Burrows:2013:PAC,
  author =       "Steven Burrows and Martin Potthast and Benno Stein",
  title =        "Paraphrase acquisition via crowdsourcing and machine
                 learning",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "43:1--43:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483676",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "To paraphrase means to rewrite content while
                 preserving the original meaning. Paraphrasing is
                 important in fields such as text reuse in journalism,
                 anonymizing work, and improving the quality of
                 customer-written reviews. This article contributes to
                 paraphrase acquisition and focuses on two aspects that
                 are not addressed by current research: (1) acquisition
                 via crowdsourcing, and (2) acquisition of passage-level
                 samples. The challenge of the first aspect is automatic
                 quality assurance; without such a means the
                 crowdsourcing paradigm is not effective, and without
                 crowdsourcing the creation of test corpora is
                 unacceptably expensive for realistic order of
                 magnitudes. The second aspect addresses the deficit
                 that most of the previous work in generating and
                 evaluating paraphrases has been conducted using
                 sentence-level paraphrases or shorter; these
                 short-sample analyses are limited in terms of
                 application to plagiarism detection, for example. We
                 present the Webis Crowd Paraphrase Corpus 2011
                 (Webis-CPC-11), which recently formed part of the PAN
                 2010 international plagiarism detection competition.
                 This corpus comprises passage-level paraphrases with
                 4067 positive samples and 3792 negative samples that
                 failed our criteria, using Amazon's Mechanical Turk for
                 crowdsourcing. In this article, we review the lessons
                 learned at PAN 2010, and explain in detail the method
                 used to construct the corpus. The empirical
                 contributions include machine learning experiments to
                 explore if passage-level paraphrases can be identified
                 in a two-class classification problem using paraphrase
                 similarity features, and we find that a
                 k-nearest-neighbor classifier can correctly distinguish
                 between paraphrased and nonparaphrased samples with
                 0.980 precision at 0.523 recall. This result implies
                 that just under half of our samples must be discarded
                 (remaining 0.477 fraction), but our cost analysis shows
                 that the automation we introduce results in a 18\%
                 financial saving and over 100 hours of time returned to
                 the researchers when repeating a similar corpus design.
                 On the other hand, when building an unrelated corpus
                 requiring, say, 25\% training data for the automated
                 component, we show that the financial outcome is cost
                 neutral, while still returning over 70 hours of time to
                 the researchers. The work presented here is the first
                 to join the paraphrasing and plagiarism communities.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "43",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bouamor:2013:MPA,
  author =       "Houda Bouamor and Aur{\'e}elien Max and Anne Vilnat",
  title =        "Multitechnique paraphrase alignment: a contribution to
                 pinpointing sub-sentential paraphrases",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "44:1--44:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483677",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This work uses parallel monolingual corpora for a
                 detailed study of the task of sub-sentential paraphrase
                 acquisition. We argue that the scarcity of this type of
                 resource is compensated by the fact that it is the most
                 suited type for studies on paraphrasing. We propose a
                 large exploration of this task with experiments on two
                 languages with five different acquisition techniques,
                 selected for their complementarity, their combinations,
                 as well as four monolingual corpus types of varying
                 comparability. We report, under all conditions, a
                 significant improvement over all techniques by
                 validating candidate paraphrases using a maximum
                 entropy classifier. An important result of our study is
                 the identification of difficult-to-acquire paraphrase
                 pairs, which are classified and quantified in a
                 bilingual typology.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "44",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yu:2013:ISS,
  author =       "Zhiwen Yu and Daqing Zhang and Nathan Eagle and Diane
                 Cook",
  title =        "Introduction to the special section on intelligent
                 systems for socially aware computing",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "45:1--45:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483678",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "45",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Schuster:2013:PSC,
  author =       "Daniel Schuster and Alberto Rosi and Marco Mamei and
                 Thomas Springer and Markus Endler and Franco
                 Zambonelli",
  title =        "Pervasive social context: Taxonomy and survey",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "46:1--46:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483679",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "As pervasive computing meets social networks, there is
                 a fast growing research field called pervasive social
                 computing. Applications in this area exploit the
                 richness of information arising out of people using
                 sensor-equipped pervasive devices in their everyday
                 life combined with intense use of different social
                 networking services. We call this set of information
                 pervasive social context. We provide a taxonomy to
                 classify pervasive social context along the dimensions
                 space, time, people, and information source (STiPI) as
                 well as commenting on the type and reason for creating
                 such context. A survey of recent research shows the
                 applicability and usefulness of the taxonomy in
                 classifying and assessing applications and systems in
                 the area of pervasive social computing. Finally, we
                 present some research challenges in this area and
                 illustrate how they affect the systems being
                 surveyed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "46",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shi:2013:NLR,
  author =       "Yue Shi and Pavel Serdyukov and Alan Hanjalic and
                 Martha Larson",
  title =        "Nontrivial landmark recommendation using geotagged
                 photos",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "47:1--47:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483680",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Online photo-sharing sites provide a wealth of
                 information about user behavior and their potential is
                 increasing as it becomes ever-more common for images to
                 be associated with location information in the form of
                 geotags. In this article, we propose a novel approach
                 that exploits geotagged images from an online community
                 for the purpose of personalized landmark
                 recommendation. Under our formulation of the task,
                 recommended landmarks should be relevant to user
                 interests and additionally they should constitute
                 nontrivial recommendations. In other words,
                 recommendations of landmarks that are highly popular
                 and frequently visited and can be easily discovered
                 through other information sources such as travel guides
                 should be avoided in favor of recommendations that
                 relate to users' personal interests. We propose a
                 collaborative filtering approach to the personalized
                 landmark recommendation task within a matrix
                 factorization framework. Our approach, WMF-CR, combines
                 weighted matrix factorization and category-based
                 regularization. The integrated weights emphasize the
                 contribution of nontrivial landmarks in order to focus
                 the recommendation model specifically on the generation
                 of nontrivial recommendations. They support the
                 judicious elimination of trivial landmarks from
                 consideration without also discarding information
                 valuable for recommendation. Category-based
                 regularization addresses the sparse data problem, which
                 is arguably even greater in the case of our landmark
                 recommendation task than in other recommendation
                 scenarios due to the limited amount of travel
                 experience recorded in the online image set of any
                 given user. We use category information extracted from
                 Wikipedia in order to provide the system with a method
                 to generalize the semantics of landmarks and allow the
                 model to relate them not only on the basis of identity,
                 but also on the basis of topical commonality. The
                 proposed approach is computational scalable, that is,
                 its complexity is linear with the number of observed
                 preferences in the user-landmark preference matrix and
                 the number of nonzero similarities in the
                 category-based landmark similarity matrix. We evaluate
                 the approach on a large collection of geotagged photos
                 gathered from Flickr. Our experimental results
                 demonstrate that WMF-CR outperforms several
                 state-of-the-art baseline approaches in recommending
                 nontrivial landmarks. Additionally, they demonstrate
                 that the approach is well suited for addressing data
                 sparseness and provides particular performance
                 improvement in the case of users who have limited
                 travel experience, that is, have visited only few
                 cities or few landmarks.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "47",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wei:2013:EPA,
  author =       "Ling-Yin Wei and Wen-Chih Peng and Wang-Chien Lee",
  title =        "Exploring pattern-aware travel routes for trajectory
                 search",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "48:1--48:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483681",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With the popularity of positioning devices, Web 2.0
                 technology, and trip sharing services, many users are
                 willing to log and share their trips on the Web. Thus,
                 trip planning Web sites are able to provide some new
                 services by inferring Regions-Of-Interest (ROIs) and
                 recommending popular travel routes from trip
                 trajectories. We argue that simply providing some
                 travel routes consisting of popular ROIs to users is
                 not sufficient. To tour around a wide geographical
                 area, for example, a city, some users may prefer a trip
                 to visit as many ROIs as possible, while others may
                 like to stop by only a few ROIs for an in-depth visit.
                 We refer to a trip fitting the former user group as an
                 in-breadth trip and a trip suitable for the latter user
                 group as an in-depth trip. Prior studies on trip
                 planning have focused on mining ROIs and travel routes
                 without considering these different preferences. In
                 this article, given a spatial range and a user
                 preference of depth/breadth specified by a user, we
                 develop a Pattern-Aware Trajectory Search (PATS)
                 framework to retrieve the top K trajectories passing
                 through popular ROIs. PATS is novel because the
                 returned travel trajectories, discovered from travel
                 patterns hidden in trip trajectories, may represent the
                 most valuable travel experiences of other travelers
                 fitting the user's trip preference in terms of depth or
                 breadth. The PATS framework comprises two components:
                 travel behavior exploration and trajectory search. The
                 travel behavior exploration component determines a set
                 of ROIs along with their attractive scores by
                 considering not only the popularity of the ROIs but
                 also the travel sequential relationships among the
                 ROIs. To capture the travel sequential relationships
                 among ROIs and to derive their attractive scores, a
                 user movement graph is constructed. For the trajectory
                 search component of PATS, we formulate two trajectory
                 score functions, the depth-trip score function and the
                 breadth-trip score function, by taking into account the
                 number of ROIs in a trajectory and their attractive
                 scores. Accordingly, we propose an algorithm, namely,
                 Bounded Trajectory Search (BTS), to efficiently
                 retrieve the top K trajectories based on the two
                 trajectory scores. The PATS framework is evaluated by
                 experiments and user studies using a real dataset. The
                 experimental results demonstrate the effectiveness and
                 the efficiency of the proposed PATS framework.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "48",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yan:2013:STM,
  author =       "Zhixian Yan and Dipanjan Chakraborty and Christine
                 Parent and Stefano Spaccapietra and Karl Aberer",
  title =        "Semantic trajectories: Mobility data computation and
                 annotation",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "49:1--49:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483682",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With the large-scale adoption of GPS equipped mobile
                 sensing devices, positional data generated by moving
                 objects (e.g., vehicles, people, animals) are being
                 easily collected. Such data are typically modeled as
                 streams of spatio-temporal (x,y,t) points, called
                 trajectories. In recent years trajectory management
                 research has progressed significantly towards efficient
                 storage and indexing techniques, as well as suitable
                 knowledge discovery. These works focused on the
                 geometric aspect of the raw mobility data. We are now
                 witnessing a growing demand in several application
                 sectors (e.g., from shipment tracking to geo-social
                 networks) on understanding the semantic behavior of
                 moving objects. Semantic behavior refers to the use of
                 semantic abstractions of the raw mobility data,
                 including not only geometric patterns but also
                 knowledge extracted jointly from the mobility data and
                 the underlying geographic and application domains
                 information. The core contribution of this article lies
                 in a semantic model and a computation and annotation
                 platform for developing a semantic approach that
                 progressively transforms the raw mobility data into
                 semantic trajectories enriched with segmentations and
                 annotations. We also analyze a number of experiments we
                 did with semantic trajectories in different domains.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "49",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chin:2013:CPT,
  author =       "Alvin Chin and Bin Xu and Hao Wang and Lele Chang and
                 Hao Wang and Lijun Zhu",
  title =        "Connecting people through physical proximity and
                 physical resources at a conference",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "50:1--50:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483683",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This work investigates how to bridge the gap between
                 offline and online behaviors at a conference and how
                 the physical resources in the conference (the physical
                 objects used in the conference for gathering attendees
                 together in engaging an activity such as rooms,
                 sessions, and papers) can be used to help facilitate
                 social networking. We build Find and Connect, a system
                 that integrates offline activities and interactions
                 captured in real time with online connections in a
                 conference environment, to provide a list of potential
                 people one should connect to for forming an ephemeral
                 social network. We investigate how social connections
                 can be established and integrated with physical
                 resources through positioning technology, and the
                 relationship between physical proximity encounters and
                 online social connections. Results from our two
                 datasets of two trials, one at the UIC/ATC 2010
                 conference and GCJK internal marketing event, show that
                 social connections that are reciprocal in relationship,
                 such as friendship and exchanged contacts, have
                 tighter, denser, and highly clustered networks compared
                 to unidirectional relationships such as follow. We
                 discover that there is a positive relationship between
                 physical proximity encounters and online social
                 connections before the social connection is made for
                 friends, but a negative relationship for after the
                 social connection is made. The first indicates social
                 selection is strong, and the second indicates social
                 influence is weak. Even though our dataset is sparse,
                 nonetheless we believe our work is promising and novel
                 which is worthy of future research.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "50",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yang:2013:ISS,
  author =       "Shanchieh Jay Yang and Dana Nau and John Salerno",
  title =        "Introduction to the special section on social
                 computing, behavioral-cultural modeling, and
                 prediction",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "51:1--51:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483684",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "51",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hung:2013:OBI,
  author =       "Benjamin W. K. Hung and Stephan E. Kolitz and Asuman
                 Ozdaglar",
  title =        "Optimization-based influencing of village social
                 networks in a counterinsurgency",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "52:1--52:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483685",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article considers the nonlethal targeting
                 assignment problem in the counterinsurgency in
                 Afghanistan, the problem of deciding on the people whom
                 U.S. forces should engage through outreach,
                 negotiations, meetings, and other interactions in order
                 to ultimately win the support of the population in
                 their area of operations. We propose two models: (1)
                 the Afghan counterinsurgency (COIN) social influence
                 model, to represent how attitudes of local leaders are
                 affected by repeated interactions with other local
                 leaders, insurgents, and counterinsurgents, and (2) the
                 nonlethal targeting model, a NonLinear Programming
                 (NLP) optimization formulation that identifies a
                 strategy for assigning k U.S. agents to produce the
                 greatest arithmetic mean of the expected long-term
                 attitude of the population. We demonstrate in an
                 experiment the merits of the optimization model in
                 nonlethal targeting, which performs significantly
                 better than both doctrine-based and random methods of
                 assignment in a large network.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "52",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gintis:2013:MMS,
  author =       "Herbert Gintis",
  title =        "{Markov} models of social dynamics: Theory and
                 applications",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "53:1--53:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483686",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article shows how agent-based models of social
                 dynamics can be treated rigorously and analytically as
                 finite Markov processes, and their long-run properties
                 are then given by an expanded version of the ergodic
                 theorem for Markov processes. A Markov process model of
                 a simplified market economy shows the fruitfulness of
                 this approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "53",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Fridman:2013:UQR,
  author =       "Natalie Fridman and Gal A. Kaminka",
  title =        "Using qualitative reasoning for social simulation of
                 crowds",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "54:1--54:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483687",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The ability to model and reason about the potential
                 violence level of a demonstration is important to the
                 police decision making process. Unfortunately, existing
                 knowledge regarding demonstrations is composed of
                 partial qualitative descriptions without complete and
                 precise numerical information. In this article we
                 describe a first attempt to use qualitative reasoning
                 techniques to model demonstrations. To our knowledge,
                 such techniques have never been applied to modeling and
                 reasoning regarding crowd behaviors, nor in particular
                 demonstrations. We develop qualitative models
                 consistent with the partial, qualitative social science
                 literature, allowing us to model the interactions
                 between different factors that influence violence in
                 demonstrations. We then utilize qualitative simulation
                 to predict the potential eruption of violence, at
                 various levels, based on a description of the
                 demographics, environmental settings, and police
                 responses. We incrementally present and compare three
                 such qualitative models. The results show that while
                 two of these models fail to predict the outcomes of
                 real-world events reported and analyzed in the
                 literature, one model provides good results. We also
                 examine whether a popular machine learning algorithm
                 (decision tree learning) can be used. While the results
                 show that the decision trees provide improved
                 predictions, we show that the QR models can be more
                 sensitive to changes, and can account for what-if
                 scenarios, in contrast to decision trees. Moreover, we
                 introduce a novel analysis algorithm that analyzes the
                 QR simulations, to automatically determine the factors
                 that are most important in influencing the outcome in
                 specific real-world demonstrations. We show that the
                 algorithm identifies factors that correspond to
                 experts' analysis of these events.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "54",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Saito:2013:DCI,
  author =       "Kazumi Saito and Masahiro Kimura and Kouzou Ohara and
                 Hiroshi Motoda",
  title =        "Detecting changes in information diffusion patterns
                 over social networks",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "55:1--55:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483688",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We addressed the problem of detecting the change in
                 behavior of information diffusion over a social network
                 which is caused by an unknown external situation change
                 using a small amount of observation data in a
                 retrospective setting. The unknown change is assumed
                 effectively reflected in changes in the parameter
                 values in the probabilistic information diffusion
                 model, and the problem is reduced to detecting where in
                 time and how long this change persisted and how big
                 this change is. We solved this problem by searching the
                 change pattern that maximizes the likelihood of
                 generating the observed information diffusion
                 sequences, and in doing so we devised a very efficient
                 general iterative search algorithm using the derivative
                 of the likelihood which avoids parameter value
                 optimization during each search step. This is in
                 contrast to the naive learning algorithm in that it has
                 to iteratively update the pattern boundaries, each
                 requiring the parameter value optimization and thus is
                 very inefficient. We tested this algorithm for two
                 instances of the probabilistic information diffusion
                 model which has different characteristics. One is of
                 information push style and the other is of information
                 pull style. We chose Asynchronous Independent Cascade
                 (AsIC) model as the former and Value-weighted Voter
                 (VwV) model as the latter. The AsIC is the model for
                 general information diffusion with binary states and
                 the parameter to detect its change is diffusion
                 probability and the VwV is the model for opinion
                 formation with multiple states and the parameter to
                 detect its change is opinion value. The results tested
                 on these two models using four real-world network
                 structures confirm that the algorithm is robust enough
                 and can efficiently identify the correct change pattern
                 of the parameter values. Comparison with the naive
                 method that finds the best combination of change
                 boundaries by an exhaustive search through a set of
                 randomly selected boundary candidates shows that the
                 proposed algorithm far outperforms the native method
                 both in terms of accuracy and computation time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "55",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Marathe:2013:AFN,
  author =       "Achla Marathe and Zhengzheng Pan and Andrea Apolloni",
  title =        "Analysis of friendship network and its role in
                 explaining obesity",
  journal =      j-TIST,
  volume =       "4",
  number =       "3",
  pages =        "56:1--56:??",
  month =        jun,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2483669.2483689",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:09 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We employ Add Health data to show that friendship
                 networks, constructed from mutual friendship
                 nominations, are important in building weight
                 perception, setting weight goals, and measuring social
                 marginalization among adolescents and young adults. We
                 study the relationship between individuals' perceived
                 weight status, actual weight status, weight status
                 relative to friends' weight status, and weight goals.
                 This analysis helps us understand how individual weight
                 perceptions might be formed, what these perceptions do
                 to the weight goals, and how friends' relative weight
                 affects weight perception and weight goals. Combining
                 this information with individuals' friendship network
                 helps determine the influence of social relationships
                 on weight-related variables. Multinomial logistic
                 regression results indicate that relative status is
                 indeed a significant predictor of perceived status, and
                 perceived status is a significant predictor of weight
                 goals. We also address the issue of causality between
                 actual weight status and social marginalization (as
                 measured by the number of friends) and show that
                 obesity precedes social marginalization in time rather
                 than the other way around. This lends credence to the
                 hypothesis that obesity leads to social marginalization
                 not vice versa. Attributes of the friendship network
                 can provide new insights into effective interventions
                 for combating obesity since adolescent friendships
                 provide an important social context for weight-related
                 behaviors.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "56",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Jiang:2013:MSB,
  author =       "Daxin Jiang and Jian Pei and Hang Li",
  title =        "Mining search and browse logs for {Web} search: a
                 survey",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "57:1--57:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508038",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Huge amounts of search log data have been accumulated
                 at Web search engines. Currently, a popular Web search
                 engine may receive billions of queries and collect
                 terabytes of records about user search behavior daily.
                 Beside search log data, huge amounts of browse log data
                 have also been collected through client-side browser
                 plugins. Such massive amounts of search and browse log
                 data provide great opportunities for mining the wisdom
                 of crowds and improving Web search. At the same time,
                 designing effective and efficient methods to clean,
                 process, and model log data also presents great
                 challenges. In this survey, we focus on mining search
                 and browse log data for Web search. We start with an
                 introduction to search and browse log data and an
                 overview of frequently-used data summarizations in log
                 mining. We then elaborate how log mining applications
                 enhance the five major components of a search engine,
                 namely, query understanding, document understanding,
                 document ranking, user understanding, and monitoring
                 and feedback. For each aspect, we survey the major
                 tasks, fundamental principles, and state-of-the-art
                 methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "57",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2013:SAM,
  author =       "Xi Li and Weiming Hu and Chunhua Shen and Zhongfei
                 Zhang and Anthony Dick and Anton {Van Den Hengel}",
  title =        "A survey of appearance models in visual object
                 tracking",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "58:1--58:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508039",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Visual object tracking is a significant computer
                 vision task which can be applied to many domains, such
                 as visual surveillance, human computer interaction, and
                 video compression. Despite extensive research on this
                 topic, it still suffers from difficulties in handling
                 complex object appearance changes caused by factors
                 such as illumination variation, partial occlusion,
                 shape deformation, and camera motion. Therefore,
                 effective modeling of the 2D appearance of tracked
                 objects is a key issue for the success of a visual
                 tracker. In the literature, researchers have proposed a
                 variety of 2D appearance models. To help readers
                 swiftly learn the recent advances in 2D appearance
                 models for visual object tracking, we contribute this
                 survey, which provides a detailed review of the
                 existing 2D appearance models. In particular, this
                 survey takes a module-based architecture that enables
                 readers to easily grasp the key points of visual object
                 tracking. In this survey, we first decompose the
                 problem of appearance modeling into two different
                 processing stages: visual representation and
                 statistical modeling. Then, different 2D appearance
                 models are categorized and discussed with respect to
                 their composition modules. Finally, we address several
                 issues of interest as well as the remaining challenges
                 for future research on this topic. The contributions of
                 this survey are fourfold. First, we review the
                 literature of visual representations according to their
                 feature-construction mechanisms (i.e., local and
                 global). Second, the existing statistical modeling
                 schemes for tracking-by-detection are reviewed
                 according to their model-construction mechanisms:
                 generative, discriminative, and hybrid
                 generative-discriminative. Third, each type of visual
                 representations or statistical modeling techniques is
                 analyzed and discussed from a theoretical or practical
                 viewpoint. Fourth, the existing benchmark resources
                 (e.g., source codes and video datasets) are examined in
                 this survey.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "58",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cena:2013:PSA,
  author =       "Federica Cena and Antonina Dattolo and Pasquale Lops
                 and Julita Vassileva",
  title =        "Perspectives in {Semantic Adaptive Social Web}",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "59:1--59:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2501603",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The Social Web is now a successful reality with its
                 quickly growing number of users and applications. Also
                 the Semantic Web, which started with the objective of
                 describing Web resources in a machine-processable way,
                 is now outgrowing the research labs and is being
                 massively exploited in many websites, incorporating
                 high-quality user-generated content and semantic
                 annotations. The primary goal of this special section
                 is to showcase some recent research at the intersection
                 of the Social Web and the Semantic Web that explores
                 the benefits that adaptation and personalization have
                 to offer in the Web of the future, the so-called Social
                 Adaptive Semantic Web. We have selected two articles
                 out of fourteen submissions based on the quality of the
                 articles and we present the main lessons learned from
                 the overall analysis of these submissions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "59",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Biancalana:2013:SSQ,
  author =       "Claudio Biancalana and Fabio Gasparetti and Alessandro
                 Micarelli and Giuseppe Sansonetti",
  title =        "Social semantic query expansion",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "60:1--60:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508041",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Weak semantic techniques rely on the integration of
                 Semantic Web techniques with social annotations and aim
                 to embrace the strengths of both. In this article, we
                 propose a novel weak semantic technique for query
                 expansion. Traditional query expansion techniques are
                 based on the computation of two-dimensional
                 co-occurrence matrices. Our approach proposes the use
                 of three-dimensional matrices, where the added
                 dimension is represented by semantic classes (i.e.,
                 categories comprising all the terms that share a
                 semantic property) related to the folksonomy extracted
                 from social bookmarking services, such as delicious and
                 StumbleUpon. The results of an indepth experimental
                 evaluation performed on both artificial datasets and
                 real users show that our approach outperforms
                 traditional techniques, such as relevance feedback and
                 personalized PageRank, so confirming the validity and
                 usefulness of the categorization of the user needs and
                 preferences in semantic classes. We also present the
                 results of a questionnaire aimed to know the users
                 opinion regarding the system. As one drawback of
                 several query expansion techniques is their high
                 computational costs, we also provide a complexity
                 analysis of our system, in order to show its capability
                 of operating in real time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "60",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2013:WMS,
  author =       "Chao Chen and Qiusha Zhu and Lin Lin and Mei-Ling
                 Shyu",
  title =        "{Web} media semantic concept retrieval via tag removal
                 and model fusion",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "61:1--61:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508042",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Multimedia data on social websites contain rich
                 semantics and are often accompanied with user-defined
                 tags. To enhance Web media semantic concept retrieval,
                 the fusion of tag-based and content-based models can be
                 used, though it is very challenging. In this article, a
                 novel semantic concept retrieval framework that
                 incorporates tag removal and model fusion is proposed
                 to tackle such a challenge. Tags with useful
                 information can facilitate media search, but they are
                 often imprecise, which makes it important to apply
                 noisy tag removal (by deleting uncorrelated tags) to
                 improve the performance of semantic concept retrieval.
                 Therefore, a multiple correspondence analysis
                 (MCA)-based tag removal algorithm is proposed, which
                 utilizes MCA's ability to capture the relationships
                 among nominal features and identify representative and
                 discriminative tags holding strong correlations with
                 the target semantic concepts. To further improve the
                 retrieval performance, a novel model fusion method is
                 also proposed to combine ranking scores from both
                 tag-based and content-based models, where the
                 adjustment of ranking scores, the reliability of
                 models, and the correlations between the intervals
                 divided on the ranking scores and the semantic concepts
                 are all considered. Comparative results with extensive
                 experiments on the NUS-WIDE-LITE as well as the
                 NUS-WIDE-270K benchmark datasets with 81 semantic
                 concepts show that the proposed framework outperforms
                 baseline results and the other comparison methods with
                 each component being evaluated separately.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "61",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Reddy:2013:ISS,
  author =       "Chandan K. Reddy and Cristopher C. Yang",
  title =        "Introduction to the special section on intelligent
                 systems for health informatics",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "62:1--62:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508043",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "62",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Batal:2013:TPM,
  author =       "Iyad Batal and Hamed Valizadegan and Gregory F. Cooper
                 and Milos Hauskrecht",
  title =        "A temporal pattern mining approach for classifying
                 electronic health record data",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "63:1--63:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508044",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We study the problem of learning classification models
                 from complex multivariate temporal data encountered in
                 electronic health record systems. The challenge is to
                 define a good set of features that are able to
                 represent well the temporal aspect of the data. Our
                 method relies on temporal abstractions and temporal
                 pattern mining to extract the classification features.
                 Temporal pattern mining usually returns a large number
                 of temporal patterns, most of which may be irrelevant
                 to the classification task. To address this problem, we
                 present the Minimal Predictive Temporal Patterns
                 framework to generate a small set of predictive and
                 nonspurious patterns. We apply our approach to the
                 real-world clinical task of predicting patients who are
                 at risk of developing heparin-induced thrombocytopenia.
                 The results demonstrate the benefit of our approach in
                 efficiently learning accurate classifiers, which is a
                 key step for developing intelligent clinical monitoring
                 systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "63",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Rashidi:2013:CMM,
  author =       "Parisa Rashidi and Diane J. Cook",
  title =        "{COM}: a method for mining and monitoring human
                 activity patterns in home-based health monitoring
                 systems",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "64:1--64:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508045",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The increasing aging population in the coming decades
                 will result in many complications for society and in
                 particular for the healthcare system due to the
                 shortage of healthcare professionals and healthcare
                 facilities. To remedy this problem, researchers have
                 pursued developing remote monitoring systems and
                 assisted living technologies by utilizing recent
                 advances in sensor and networking technology, as well
                 as in the data mining and machine learning fields. In
                 this article, we report on our fully automated approach
                 for discovering and monitoring patterns of daily
                 activities. Discovering and tracking patterns of daily
                 activities can provide unprecedented opportunities for
                 health monitoring and assisted living applications,
                 especially for older adults and individuals with mental
                 disabilities. Previous approaches usually rely on
                 preselected activities or labeled data to track and
                 monitor daily activities. In this article, we present a
                 fully automated approach by discovering natural
                 activity patterns and their variations in real-life
                 data. We will show how our activity discovery component
                 can be integrated with an activity recognition
                 component to track and monitor various daily activity
                 patterns. We also provide an activity visualization
                 component to allow caregivers to visually observe and
                 examine the activity patterns using a user-friendly
                 interface. We validate our algorithms using real-life
                 data obtained from two apartments during a three-month
                 period.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "64",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wolf:2013:DUR,
  author =       "Hannes Wolf and Klaus Herrmann and Kurt Rothermel",
  title =        "Dealing with uncertainty: Robust workflow navigation
                 in the healthcare domain",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "65:1--65:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508046",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Processes in the healthcare domain are characterized
                 by coarsely predefined recurring procedures that are
                 flexibly adapted by the personnel to suite-specific
                 situations. In this setting, a workflow management
                 system that gives guidance and documents the
                 personnel's actions can lead to a higher quality of
                 care, fewer mistakes, and higher efficiency. However,
                 most existing workflow management systems enforce rigid
                 inflexible workflows and rely on direct manual input.
                 Both are inadequate for healthcare processes. In
                 particular, direct manual input is not possible in most
                 cases since (1) it would distract the personnel even in
                 critical situations and (2) it would violate
                 fundamental hygiene principles by requiring disinfected
                 doctors and nurses to touch input devices. The solution
                 could be activity recognition systems that use sensor
                 data (e.g., audio and acceleration data) to infer the
                 current activities by the personnel and provide input
                 to a workflow (e.g., informing it that a certain
                 activity is finished now). However, state-of-the-art
                 activity recognition technologies have difficulties in
                 providing reliable information. We describe a
                 comprehensive framework tailored for flexible
                 human-centric healthcare processes that improves the
                 reliability of activity recognition data. We present a
                 set of mechanisms that exploit the application
                 knowledge encoded in workflows in order to reduce the
                 uncertainty of this data, thus enabling unobtrusive
                 robust healthcare workflows. We evaluate our work based
                 on a real-world case study and show that the robustness
                 of unobtrusive healthcare workflows can be increased to
                 an absolute value of up to 91\% (compared to only 12\%
                 with a classical workflow system). This is a major
                 breakthrough that paves the way towards future
                 IT-enabled healthcare systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "65",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Park:2013:CPC,
  author =       "Yubin Park and Joydeep Ghosh",
  title =        "{CUDIA}: Probabilistic cross-level imputation using
                 individual auxiliary information",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "66:1--66:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508047",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In healthcare-related studies, individual patient or
                 hospital data are not often publicly available due to
                 privacy restrictions, legal issues, or reporting norms.
                 However, such measures may be provided at a higher or
                 more aggregated level, such as state-level,
                 county-level summaries or averages over health zones,
                 such as hospital referral regions (HRR) or hospital
                 service areas (HSA). Such levels constitute partitions
                 over the underlying individual level data, which may
                 not match the groupings that would have been obtained
                 if one clustered the data based on individual-level
                 attributes. Moreover, treating aggregated values as
                 representatives for the individuals can result in the
                 ecological fallacy. How can one run data mining
                 procedures on such data where different variables are
                 available at different levels of aggregation or
                 granularity? In this article, we seek a better
                 utilization of variably aggregated datasets, which are
                 possibly assembled from different sources. We propose a
                 novel cross-level imputation technique that models the
                 generative process of such datasets using a Bayesian
                 directed graphical model. The imputation is based on
                 the underlying data distribution and is shown to be
                 unbiased. This imputation can be further utilized in a
                 subsequent predictive modeling, yielding improved
                 accuracies. The experimental results using a simulated
                 dataset and the Behavioral Risk Factor Surveillance
                 System (BRFSS) dataset are provided to illustrate the
                 generality and capabilities of the proposed
                 framework.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "66",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hoens:2013:RMR,
  author =       "T. Ryan Hoens and Marina Blanton and Aaron Steele and
                 Nitesh V. Chawla",
  title =        "Reliable medical recommendation systems with patient
                 privacy",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "67:1--67:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508048",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "One of the concerns patients have when confronted with
                 a medical condition is which physician to trust. Any
                 recommendation system that seeks to answer this
                 question must ensure that any sensitive medical
                 information collected by the system is properly
                 secured. In this article, we codify these privacy
                 concerns in a privacy-friendly framework and present
                 two architectures that realize it: the Secure
                 Processing Architecture (SPA) and the Anonymous
                 Contributions Architecture (ACA). In SPA, patients
                 submit their ratings in a protected form without
                 revealing any information about their data and the
                 computation of recommendations proceeds over the
                 protected data using secure multiparty computation
                 techniques. In ACA, patients submit their ratings in
                 the clear, but no link between a submission and patient
                 data can be made. We discuss various aspects of both
                 architectures, including techniques for ensuring
                 reliability of computed recommendations and system
                 performance, and provide their comparison.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "67",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Khan:2013:VOM,
  author =       "Atif Khan and John A. Doucette and Robin Cohen",
  title =        "Validation of an ontological medical decision support
                 system for patient treatment using a repository of
                 patient data: Insights into the value of machine
                 learning",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "68:1--68:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508049",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we begin by presenting OMeD, a
                 medical decision support system, and argue for its
                 value over purely probabilistic approaches that reason
                 about patients for time-critical decision scenarios. We
                 then progress to present Holmes, a Hybrid Ontological
                 and Learning MEdical System which supports decision
                 making about patient treatment. This system is
                 introduced in order to cope with the case of missing
                 data. We demonstrate its effectiveness by operating on
                 an extensive set of real-world patient health data from
                 the CDC, applied to the decision-making scenario of
                 administering sleeping pills. In particular, we clarify
                 how the combination of semantic, ontological
                 representations, and probabilistic reasoning together
                 enable the proposal of effective patient treatments.
                 Our focus is thus on presenting an approach for
                 interpreting medical data in the context of real-time
                 decision making. This constitutes a comprehensive
                 framework for the design of medical recommendation
                 systems for potential use by medical professionals and
                 patients both, with the end result being personalized
                 patient treatment. We conclude with a discussion of the
                 value of our particular approach for such diverse
                 considerations as coping with misinformation provided
                 by patients, performing effectively in time-critical
                 environments where real-time decisions are necessary,
                 and potential applications facilitating patient
                 information gathering.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "68",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lee:2013:CPR,
  author =       "Suk Jin Lee and Yuichi Motai and Elisabeth Weiss and
                 Shumei S. Sun",
  title =        "Customized prediction of respiratory motion with
                 clustering from multiple patient interaction",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "69:1--69:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508050",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Information processing of radiotherapy systems has
                 become an important research area for sophisticated
                 radiation treatment methodology. Geometrically precise
                 delivery of radiotherapy in the thorax and upper
                 abdomen is compromised by respiratory motion during
                 treatment. Accurate prediction of the respiratory
                 motion would be beneficial for improving tumor
                 targeting. However, a wide variety of breathing
                 patterns can make it difficult to predict the breathing
                 motion with explicit models. We proposed a respiratory
                 motion predictor, that is, customized prediction with
                 multiple patient interactions using neural network
                 (CNN). For the preprocedure of prediction for
                 individual patient, we construct the clustering based
                 on breathing patterns of multiple patients using the
                 feature selection metrics that are composed of a
                 variety of breathing features. In the intraprocedure,
                 the proposed CNN used neural networks (NN) for a part
                 of the prediction and the extended Kalman filter (EKF)
                 for a part of the correction. The prediction accuracy
                 of the proposed method was investigated with a variety
                 of prediction time horizons using normalized root mean
                 squared error (NRMSE) values in comparison with the
                 alternate recurrent neural network (RNN). We have also
                 evaluated the prediction accuracy using the marginal
                 value that can be used as the reference value to judge
                 how many signals lie outside the confidence level. The
                 experimental results showed that the proposed CNN can
                 outperform RNN with respect to the prediction accuracy
                 with an improvement of 50\%.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "69",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Baralis:2013:EPH,
  author =       "Elena Baralis and Tania Cerquitelli and Silvia
                 Chiusano and Vincenzo D'Elia and Riccardo Molinari and
                 Davide Susta",
  title =        "Early prediction of the highest workload in
                 incremental cardiopulmonary tests",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "70:1--70:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508051",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Incremental tests are widely used in cardiopulmonary
                 exercise testing, both in the clinical domain and in
                 sport sciences. The highest workload (denoted
                 W$_{peak}$ ) reached in the test is key information for
                 assessing the individual body response to the test and
                 for analyzing possible cardiac failures and planning
                 rehabilitation, and training sessions. Being physically
                 very demanding, incremental tests can significantly
                 increase the body stress on monitored individuals and
                 may cause cardiopulmonary overload. This article
                 presents a new approach to cardiopulmonary testing that
                 addresses these drawbacks. During the test, our
                 approach analyzes the individual body response to the
                 exercise and predicts the W$_{peak}$ value that will be
                 reached in the test and an evaluation of its accuracy.
                 When the accuracy of the prediction becomes
                 satisfactory, the test can be prematurely stopped, thus
                 avoiding its entire execution. To predict W$_{peak}$,
                 we introduce a new index, the CardioPulmonary
                 Efficiency Index (CPE), summarizing the cardiopulmonary
                 response of the individual to the test. Our approach
                 analyzes the CPE trend during the test, together with
                 the characteristics of the individual, and predicts
                 W$_{peak}$. A K-nearest-neighbor-based classifier and
                 an ANN-based classier are exploited for the prediction.
                 The experimental evaluation showed that the W$_{peak}$
                 value can be predicted with a limited error from the
                 first steps of the test.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "70",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lee:2013:SFI,
  author =       "Yugyung Lee and Saranya Krishnamoorthy and Deendayal
                 Dinakarpandian",
  title =        "A semantic framework for intelligent matchmaking for
                 clinical trial eligibility criteria",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "71:1--71:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508052",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "An integral step in the discovery of new treatments
                 for medical conditions is the matching of potential
                 subjects with appropriate clinical trials. Eligibility
                 criteria for clinical trials are typically specified as
                 inclusion and exclusion criteria for each study in
                 freetext form. While this is sufficient for a human to
                 guide a recruitment interview, it cannot be reliably
                 and computationally construed to identify potential
                 subjects. Standardization of the representation of
                 eligibility criteria can enhance the efficiency and
                 accuracy of this process. This article presents a
                 semantic framework that facilitates intelligent
                 matchmaking by identifying a minimal set of eligibility
                 criteria with maximal coverage of clinical trials. In
                 contrast to existing top-down manual standardization
                 efforts, a bottom-up data driven approach is presented
                 to find a canonical nonredundant representation of an
                 arbitrary collection of clinical trial criteria. The
                 methodology has been validated with a corpus of 709
                 clinical trials related to Generalized Anxiety Disorder
                 containing 2,760 inclusion and 4,871 exclusion
                 eligibility criteria. This corpus is well represented
                 by a relatively small number of 126 inclusion clusters
                 and 175 exclusion clusters, each of which corresponds
                 to a semantically distinct criterion. Internal and
                 external validation measures provide an objective
                 evaluation of the method. An eligibility criteria
                 ontology has been constructed based on the clustering.
                 The resulting model has been incorporated into the
                 development of the MindTrial clinical trial recruiting
                 system. The prototype for clinical trial recruitment
                 illustrates the effectiveness of the methodology in
                 characterizing clinical trials and subjects and
                 accurate matching between them.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "71",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bi:2013:MLA,
  author =       "Jinbo Bi and Jiangwen Sun and Yu Wu and Howard Tennen
                 and Stephen Armeli",
  title =        "A machine learning approach to college drinking
                 prediction and risk factor identification",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "72:1--72:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508053",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Alcohol misuse is one of the most serious public
                 health problems facing adolescents and young adults in
                 the United States. National statistics shows that
                 nearly 90\% of alcohol consumed by youth under 21 years
                 of age involves binge drinking and 44\% of college
                 students engage in high-risk drinking activities.
                 Conventional alcohol intervention programs, which aim
                 at installing either an alcohol reduction norm or
                 prohibition against underage drinking, have yielded
                 little progress in controlling college binge drinking
                 over the years. Existing alcohol studies are deductive
                 where data are collected to investigate a
                 psychological/behavioral hypothesis, and statistical
                 analysis is applied to the data to confirm the
                 hypothesis. Due to this confirmatory manner of
                 analysis, the resulting statistical models are
                 cohort-specific and typically fail to replicate on a
                 different sample. This article presents two machine
                 learning approaches for a secondary analysis of
                 longitudinal data collected in college alcohol studies
                 sponsored by the National Institute on Alcohol Abuse
                 and Alcoholism. Our approach aims to discover
                 knowledge, from multiwave cohort-sequential daily data,
                 which may or may not align with the original hypothesis
                 but quantifies predictive models with higher likelihood
                 to generalize to new samples. We first propose a
                 so-called temporally-correlated support vector machine
                 to construct a classifier as a function of daily moods,
                 stress, and drinking expectancies to distinguish days
                 with nighttime binge drinking from days without for
                 individual students. We then propose a combination of
                 cluster analysis and feature selection, where cluster
                 analysis is used to identify drinking patterns based on
                 averaged daily drinking behavior and feature selection
                 is used to identify risk factors associated with each
                 pattern. We evaluate our methods on two cohorts of 530
                 total college students recruited during the Spring and
                 Fall semesters, respectively. Cross validation on these
                 two cohorts and further on 100 random partitions of the
                 total students demonstrate that our methods improve the
                 model generalizability in comparison with traditional
                 multilevel logistic regression. The discovered risk
                 factors and the interaction of these factors delineated
                 in our models can set a potential basis and offer
                 insights to a new design of more effective college
                 alcohol interventions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "72",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Subbu:2013:LMF,
  author =       "Kalyan Pathapati Subbu and Brandon Gozick and Ram
                 Dantu",
  title =        "{LocateMe}: Magnetic-fields-based indoor localization
                 using smartphones",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "73:1--73:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508054",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Fine-grained localization is extremely important to
                 accurately locate a user indoors. Although innovative
                 solutions have already been proposed, there is no
                 solution that is universally accepted, easily
                 implemented, user centric, and, most importantly, works
                 in the absence of GSM coverage or WiFi availability.
                 The advent of sensor rich smartphones has paved a way
                 to develop a solution that can cater to these
                 requirements. By employing a smartphone's built-in
                 magnetic field sensor, magnetic signatures were
                 collected inside buildings. These signatures displayed
                 a uniqueness in their patterns due to the presence of
                 different kinds of pillars, doors, elevators, etc.,
                 that consist of ferromagnetic materials like steel or
                 iron. We theoretically analyze the cause of this
                 uniqueness and then present an indoor localization
                 solution by classifying signatures based on their
                 patterns. However, to account for user walking speed
                 variations so as to provide an application usable to a
                 variety of users, we follow a dynamic
                 time-warping-based approach that is known to work on
                 similar signals irrespective of their variations in the
                 time axis. Our approach resulted in localization
                 distances of approximately 2m--6m with accuracies
                 between 80--100\% implying that it is sufficient to
                 walk short distances across hallways to be located by
                 the smartphone. The implementation of the application
                 on different smartphones yielded response times of less
                 than five secs, thereby validating the feasibility of
                 our approach and making it a viable solution.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "73",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2013:RWM,
  author =       "Bin Chen and Jian Su and Chew Lim Tan",
  title =        "Random walks down the mention graphs for event
                 coreference resolution",
  journal =      j-TIST,
  volume =       "4",
  number =       "4",
  pages =        "74:1--74:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2508037.2508055",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Event coreference is an important task in event
                 extraction and other natural language processing tasks.
                 Despite its importance, it was merely discussed in
                 previous studies. In this article, we present a global
                 coreference resolution system dedicated to various
                 sophisticated event coreference phenomena. First, seven
                 resolvers are utilized to resolve different event and
                 object coreference mention pairs with a new instance
                 selection strategy and new linguistic features. Second,
                 a global solution-a modified random walk
                 partitioning-is employed for the chain formation. Being
                 the first attempt to apply the random walk model for
                 coreference resolution, the revised model utilizes a
                 sampling method, termination criterion, and stopping
                 probability to greatly improve the effectiveness of
                 random walk model for event coreference resolution.
                 Last but not least, the new model facilitates a
                 convenient way to incorporate sophisticated linguistic
                 constraints and preferences, the related object mention
                 graph, as well as pronoun coreference information not
                 used in previous studies for effective chain formation.
                 In total, these techniques impose more than 20\%
                 F-score improvement over the baseline system.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "74",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Editors:2013:ISS,
  author =       "Editors",
  title =        "Introduction to special section on intelligent mobile
                 knowledge discovery and management systems",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542183",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ying:2013:MGT,
  author =       "Josh Jia-Ching Ying and Wang-Chien Lee and Vincent S.
                 Tseng",
  title =        "Mining geographic-temporal-semantic patterns in
                 trajectories for location prediction",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542184",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In recent years, research on location predictions by
                 mining trajectories of users has attracted a lot of
                 attention. Existing studies on this topic mostly treat
                 such predictions as just a type of location
                 recommendation, that is, they predict the next location
                 of a user using location recommenders. However, an user
                 usually visits somewhere for reasons other than
                 interestingness. In this article, we propose a novel
                 mining-based location prediction approach called
                 Geographic-Temporal-Semantic-based Location Prediction
                 (GTS-LP), which takes into account a user's
                 geographic-triggered intentions, temporal-triggered
                 intentions, and semantic-triggered intentions, to
                 estimate the probability of the user in visiting a
                 location. The core idea underlying our proposal is the
                 discovery of trajectory patterns of users, namely GTS
                 patterns, to capture frequent movements triggered by
                 the three kinds of intentions. To achieve this goal, we
                 define a new trajectory pattern to capture the key
                 properties of the behaviors that are motivated by the
                 three kinds of intentions from trajectories of users.
                 In our GTS-LP approach, we propose a series of novel
                 matching strategies to calculate the similarity between
                 the current movement of a user and discovered GTS
                 patterns based on various moving intentions. On the
                 basis of similitude, we make an online prediction as to
                 the location the user intends to visit. To the best of
                 our knowledge, this is the first work on location
                 prediction based on trajectory pattern mining that
                 explores the geographic, temporal, and semantic
                 properties simultaneously. By means of a comprehensive
                 evaluation using various real trajectory datasets, we
                 show that our proposed GTS-LP approach delivers
                 excellent performance and significantly outperforms
                 existing state-of-the-art location prediction
                 methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tang:2013:FTC,
  author =       "Lu-An Tang and Yu Zheng and Jing Yuan and Jiawei Han
                 and Alice Leung and Wen-Chih Peng and Thomas {La
                 Porta}",
  title =        "A framework of traveling companion discovery on
                 trajectory data streams",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542185",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The advance of mobile technologies leads to huge
                 volumes of spatio-temporal data collected in the form
                 of trajectory data streams. In this study, we
                 investigate the problem of discovering object groups
                 that travel together (i.e., traveling companions ) from
                 trajectory data streams. Such technique has broad
                 applications in the areas of scientific study,
                 transportation management, and military surveillance.
                 To discover traveling companions, the monitoring system
                 should cluster the objects of each snapshot and
                 intersect the clustering results to retrieve
                 moving-together objects. Since both clustering and
                 intersection steps involve high computational overhead,
                 the key issue of companion discovery is to improve the
                 efficiency of algorithms. We propose the models of
                 closed companion candidates and smart intersection to
                 accelerate data processing. A data structure termed
                 traveling buddy is designed to facilitate scalable and
                 flexible companion discovery from trajectory streams.
                 The traveling buddies are microgroups of objects that
                 are tightly bound together. By only storing the object
                 relationships rather than their spatial coordinates,
                 the buddies can be dynamically maintained along the
                 trajectory stream with low cost. Based on traveling
                 buddies, the system can discover companions without
                 accessing the object details. In addition, we extend
                 the proposed framework to discover companions on more
                 complicated scenarios with spatial and temporal
                 constraints, such as on the road network and
                 battlefield. The proposed methods are evaluated with
                 extensive experiments on both real and synthetic
                 datasets. Experimental results show that our proposed
                 buddy-based approach is an order of magnitude faster
                 than the baselines and achieves higher accuracy in
                 companion discovery.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Doo:2013:MTF,
  author =       "Myungcheol Doo and Ling Liu",
  title =        "{Mondrian} tree: a fast index for spatial alarm
                 processing",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542186",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With ubiquitous wireless connectivity and
                 technological advances in mobile devices, we witness
                 the growing demands and increasing market shares of
                 mobile intelligent systems and technologies for
                 real-time decision making and location-based knowledge
                 discovery. Spatial alarms are considered as one of the
                 fundamental capabilities for intelligent mobile
                 location-based systems. Like time-based alarms that
                 remind us the arrival of a future time point, spatial
                 alarms remind us the arrival of a future spatial point.
                 Existing approaches for scaling spatial alarm
                 processing are focused on computing Alarm-Free Regions
                 (A fr) and Alarm-Free Period (Afp) such that mobile
                 objects traveling within an Afr can safely hibernate
                 the alarm evaluation process for the computed Afp, to
                 save battery power, until approaching the nearest alarm
                 of interest. A key technical challenge in scaling
                 spatial alarm processing is to efficiently compute Afr
                 and Afp such that mobile objects traveling within an
                 Afr can safely hibernate the alarm evaluation process
                 during the computed Afp, while maintaining high
                 accuracy. In this article we argue that on-demand
                 computation of Afr is expensive and may not scale well
                 for dense populations of mobile objects. Instead, we
                 propose to maintain an index for both spatial alarms
                 and empty regions (Afr) such that for a given mobile
                 user's location, we can find relevant spatial alarms
                 and whether it is in an alarm-free region more
                 efficiently. We also show that conventional spatial
                 indexing methods, such as R-tree family, k -d tree,
                 Quadtree, and Grid, are by design not well suited to
                 index empty regions. We present Mondrian Tree --- a
                 region partitioning tree for indexing both spatial
                 alarms and alarm-free regions. We first introduce the
                 Mondrian Tree indexing algorithms, including index
                 construction, search, and maintenance. Then we describe
                 a suite of Mondrian Tree optimizations to further
                 enhance the performance of spatial alarm processing.
                 Our experimental evaluation shows that the Mondrian
                 Tree index, as an intelligent technology for mobile
                 systems, outperforms traditional index methods, such as
                 R-tree, Quadtree, and k -d tree, for spatial alarm
                 processing.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bonchi:2013:ISI,
  author =       "Francesco Bonchi and Wray Buntine and Ricard
                 Gavald{\'a} and Shengbo Guo",
  title =        "Introduction to the special issue on {Social Web}
                 mining",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542187",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{He:2013:DJS,
  author =       "Yulan He and Chenghua Lin and Wei Gao and Kam-Fai
                 Wong",
  title =        "Dynamic joint sentiment-topic model",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542188",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Social media data are produced continuously by a large
                 and uncontrolled number of users. The dynamic nature of
                 such data requires the sentiment and topic analysis
                 model to be also dynamically updated, capturing the
                 most recent language use of sentiments and topics in
                 text. We propose a dynamic Joint Sentiment-Topic model
                 (dJST) which allows the detection and tracking of views
                 of current and recurrent interests and shifts in topic
                 and sentiment. Both topic and sentiment dynamics are
                 captured by assuming that the current
                 sentiment-topic-specific word distributions are
                 generated according to the word distributions at
                 previous epochs. We study three different ways of
                 accounting for such dependency information: (1) sliding
                 window where the current sentiment-topic word
                 distributions are dependent on the previous
                 sentiment-topic-specific word distributions in the last
                 S epochs; (2) skip model where history sentiment topic
                 word distributions are considered by skipping some
                 epochs in between; and (3) multiscale model where
                 previous long- and short- timescale distributions are
                 taken into consideration. We derive efficient online
                 inference procedures to sequentially update the model
                 with newly arrived data and show the effectiveness of
                 our proposed model on the Mozilla add-on reviews
                 crawled between 2007 and 2011.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cataldi:2013:PET,
  author =       "Mario Cataldi and Luigi {Di Caro} and Claudio
                 Schifanella",
  title =        "Personalized emerging topic detection based on a term
                 aging model",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542189",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Twitter is a popular microblogging service that acts
                 as a ground-level information news flashes portal where
                 people with different background, age, and social
                 condition provide information about what is happening
                 in front of their eyes. This characteristic makes
                 Twitter probably the fastest information service in the
                 world. In this article, we recognize this role of
                 Twitter and propose a novel, user-aware topic detection
                 technique that permits to retrieve, in real time, the
                 most emerging topics of discussion expressed by the
                 community within the interests of specific users.
                 First, we analyze the topology of Twitter looking at
                 how the information spreads over the network, taking
                 into account the authority/influence of each active
                 user. Then, we make use of a novel term aging model to
                 compute the burstiness of each term, and provide a
                 graph-based method to retrieve the minimal set of terms
                 that can represent the corresponding topic. Finally,
                 since any user can have topic preferences inferable
                 from the shared content, we leverage such knowledge to
                 highlight the most emerging topics within her foci of
                 interest. As evaluation we then provide several
                 experiments together with a user study proving the
                 validity and reliability of the proposed approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Arias:2013:FTD,
  author =       "Marta Arias and Argimiro Arratia and Ramon Xuriguera",
  title =        "Forecasting with {Twitter} data",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542190",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The dramatic rise in the use of social network
                 platforms such as Facebook or Twitter has resulted in
                 the availability of vast and growing user-contributed
                 repositories of data. Exploiting this data by
                 extracting useful information from it has become a
                 great challenge in data mining and knowledge discovery.
                 A recently popular way of extracting useful information
                 from social network platforms is to build indicators,
                 often in the form of a time series, of general public
                 mood by means of sentiment analysis. Such indicators
                 have been shown to correlate with a diverse variety of
                 phenomena. In this article we follow this line of work
                 and set out to assess, in a rigorous manner, whether a
                 public sentiment indicator extracted from daily Twitter
                 messages can indeed improve the forecasting of social,
                 economic, or commercial indicators. To this end we have
                 collected and processed a large amount of Twitter posts
                 from March 2011 to the present date for two very
                 different domains: stock market and movie box office
                 revenue. For each of these domains, we build and
                 evaluate forecasting models for several target time
                 series both using and ignoring the Twitter-related
                 data. If Twitter does help, then this should be
                 reflected in the fact that the predictions of models
                 that use Twitter-related data are better than the
                 models that do not use this data. By systematically
                 varying the models that we use and their parameters,
                 together with other tuning factors such as lag or the
                 way in which we build our Twitter sentiment index, we
                 obtain a large dataset that allows us to test our
                 hypothesis under different experimental conditions.
                 Using a novel decision-tree-based technique that we
                 call summary tree we are able to mine this large
                 dataset and obtain automatically those configurations
                 that lead to an improvement in the prediction power of
                 our forecasting models. As a general result, we have
                 seen that nonlinear models do take advantage of Twitter
                 data when forecasting trends in volatility indices,
                 while linear ones fail systematically when forecasting
                 any kind of financial time series. In the case of
                 predicting box office revenue trend, it is support
                 vector machines that make best use of Twitter data. In
                 addition, we conduct statistical tests to determine the
                 relation between our Twitter time series and the
                 different target time series.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lee:2013:CES,
  author =       "Kyumin Lee and James Caverlee and Zhiyuan Cheng and
                 Daniel Z. Sui",
  title =        "Campaign extraction from social media",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "9:1--9:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542191",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this manuscript, we study the problem of detecting
                 coordinated free text campaigns in large-scale social
                 media. These campaigns-ranging from coordinated spam
                 messages to promotional and advertising campaigns to
                 political astro-turfing-are growing in significance and
                 reach with the commensurate rise in massive-scale
                 social systems. Specifically, we propose and evaluate a
                 content-driven framework for effectively linking free
                 text posts with common ``talking points'' and
                 extracting campaigns from large-scale social media.
                 Three of the salient features of the campaign
                 extraction framework are: (i) first, we investigate
                 graph mining techniques for isolating coherent
                 campaigns from large message-based graphs; (ii) second,
                 we conduct a comprehensive comparative study of
                 text-based message correlation in message and user
                 levels; and (iii) finally, we analyze temporal
                 behaviors of various campaign types. Through an
                 experimental study over millions of Twitter messages we
                 identify five major types of campaigns-namely Spam,
                 Promotion, Template, News, and Celebrity campaigns-and
                 we show how these campaigns may be extracted with high
                 precision and recall.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Fire:2013:CEL,
  author =       "Michael Fire and Lena Tenenboim-Chekina and Rami Puzis
                 and Ofrit Lesser and Lior Rokach and Yuval Elovici",
  title =        "Computationally efficient link prediction in a variety
                 of social networks",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "10:1--10:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542192",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Online social networking sites have become
                 increasingly popular over the last few years. As a
                 result, new interdisciplinary research directions have
                 emerged in which social network analysis methods are
                 applied to networks containing hundreds of millions of
                 users. Unfortunately, links between individuals may be
                 missing either due to an imperfect acquirement process
                 or because they are not yet reflected in the online
                 network (i.e., friends in the real world did not form a
                 virtual connection). The primary bottleneck in link
                 prediction techniques is extracting the structural
                 features required for classifying links. In this
                 article, we propose a set of simple, easy-to-compute
                 structural features that can be analyzed to identify
                 missing links. We show that by using simple structural
                 features, a machine learning classifier can
                 successfully identify missing links, even when applied
                 to a predicament of classifying links between
                 individuals with at least one common friend. We also
                 present a method for calculating the amount of data
                 needed in order to build more accurate classifiers. The
                 new Friends measure and Same community features we
                 developed are shown to be good predictors for missing
                 links. An evaluation experiment was performed on ten
                 large social networks datasets: Academia.edu, DBLP,
                 Facebook, Flickr, Flixster, Google+, Gowalla,
                 TheMarker, Twitter, and YouTube. Our methods can
                 provide social network site operators with the
                 capability of helping users to find known, offline
                 contacts and to discover new friends online. They may
                 also be used for exposing hidden links in online social
                 networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cruz:2013:CDV,
  author =       "Juan David Cruz and C{\'e}cile Bothorel and
                 Fran{\c{c}}ois Poulet",
  title =        "Community detection and visualization in social
                 networks: Integrating structural and semantic
                 information",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "11:1--11:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542193",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Due to the explosion of social networking and the
                 information sharing among their users, the interest in
                 analyzing social networks has increased over the recent
                 years. Two general interests in this kind of studies
                 are community detection and visualization. In the first
                 case, most of the classic algorithms for community
                 detection use only the structural information to
                 identify groups, that is, how clusters are formed
                 according to the topology of the relationships.
                 However, these methods do not take into account any
                 semantic information which could guide the clustering
                 process, and which may add elements to conduct further
                 analyses. In the second case most of the layout
                 algorithms for clustered graphs have been designed to
                 differentiate the groups within the graph, but they are
                 not designed to analyze the interactions between such
                 groups. Identifying these interactions gives an insight
                 into the way different communities exchange messages or
                 information, and allows the social network researcher
                 to identify key actors, roles, and paths from one
                 community to another. This article presents a novel
                 model to use, in a conjoint way, the semantic
                 information from the social network and its structural
                 information to, first, find structurally and
                 semantically related groups of nodes, and second, a
                 layout algorithm for clustered graphs which divides the
                 nodes into two types, one for nodes with edges
                 connecting other communities and another with nodes
                 connecting nodes only within their own community. With
                 this division the visualization tool focuses on the
                 connections between groups facilitating deep studies of
                 augmented social networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cagliero:2013:PTR,
  author =       "Luca Cagliero and Alessandro Fiori and Luigi
                 Grimaudo",
  title =        "Personalized tag recommendation based on generalized
                 rules",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "12:1--12:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542194",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Tag recommendation is focused on recommending useful
                 tags to a user who is annotating a Web resource. A
                 relevant research issue is the recommendation of
                 additional tags to partially annotated resources, which
                 may be based on either personalized or collective
                 knowledge. However, since the annotation process is
                 usually not driven by any controlled vocabulary, the
                 collections of user-specific and collective annotations
                 are often very sparse. Indeed, the discovery of the
                 most significant associations among tags becomes a
                 challenging task. This article presents a novel
                 personalized tag recommendation system that discovers
                 and exploits generalized association rules, that is,
                 tag correlations holding at different abstraction
                 levels, to identify additional pertinent tags to
                 suggest. The use of generalized rules relevantly
                 improves the effectiveness of traditional rule-based
                 systems in coping with sparse tag collections, because:
                 (i) correlations hidden at the level of individual tags
                 may be anyhow figured out at higher abstraction levels
                 and (ii) low-level tag associations discovered from
                 collective data may be exploited to specialize
                 high-level associations discovered in the user-specific
                 context. The effectiveness of the proposed system has
                 been validated against other personalized approaches on
                 real-life and benchmark collections retrieved from the
                 popular photo-sharing system Flickr.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Elahi:2013:ALS,
  author =       "Mehdi Elahi and Francesco Ricci and Neil Rubens",
  title =        "Active learning strategies for rating elicitation in
                 collaborative filtering: a system-wide perspective",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "13:1--13:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542195",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The accuracy of collaborative-filtering recommender
                 systems largely depends on three factors: the quality
                 of the rating prediction algorithm, and the quantity
                 and quality of available ratings. While research in the
                 field of recommender systems often concentrates on
                 improving prediction algorithms, even the best
                 algorithms will fail if they are fed poor-quality data
                 during training, that is, garbage in, garbage out.
                 Active learning aims to remedy this problem by focusing
                 on obtaining better-quality data that more aptly
                 reflects a user's preferences. However, traditional
                 evaluation of active learning strategies has two major
                 flaws, which have significant negative ramifications on
                 accurately evaluating the system's performance
                 (prediction error, precision, and quantity of elicited
                 ratings). (1) Performance has been evaluated for each
                 user independently (ignoring system-wide improvements).
                 (2) Active learning strategies have been evaluated in
                 isolation from unsolicited user ratings (natural
                 acquisition). In this article we show that an elicited
                 rating has effects across the system, so a typical
                 user-centric evaluation which ignores any changes of
                 rating prediction of other users also ignores these
                 cumulative effects, which may be more influential on
                 the performance of the system as a whole (system
                 centric). We propose a new evaluation methodology and
                 use it to evaluate some novel and state-of-the-art
                 rating elicitation strategies. We found that the
                 system-wide effectiveness of a rating elicitation
                 strategy depends on the stage of the rating elicitation
                 process, and on the evaluation measures (MAE, NDCG, and
                 Precision). In particular, we show that using some
                 common user-centric strategies may actually degrade the
                 overall performance of a system. Finally, we show that
                 the performance of many common active learning
                 strategies changes significantly when evaluated
                 concurrently with the natural acquisition of ratings in
                 recommender systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{deMeo:2013:AUB,
  author =       "Pasquale de Meo and Emilio Ferrara and Fabian Abel and
                 Lora Aroyo and Geert-Jan Houben",
  title =        "Analyzing user behavior across social sharing
                 environments",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "14:1--14:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2535526",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this work we present an in-depth analysis of the
                 user behaviors on different Social Sharing systems. We
                 consider three popular platforms, Flickr, Delicious and
                 StumbleUpon, and, by combining techniques from social
                 network analysis with techniques from semantic
                 analysis, we characterize the tagging behavior as well
                 as the tendency to create friendship relationships of
                 the users of these platforms. The aim of our
                 investigation is to see if (and how) the features and
                 goals of a given Social Sharing system reflect on the
                 behavior of its users and, moreover, if there exists a
                 correlation between the social and tagging behavior of
                 the users. We report our findings in terms of the
                 characteristics of user profiles according to three
                 different dimensions: (i) intensity of user activities,
                 (ii) tag-based characteristics of user profiles, and
                 (iii) semantic characteristics of user profiles.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shi:2013:ACL,
  author =       "Ziqiang Shi and Jiqing Han and Tieran Zheng",
  title =        "Audio classification with low-rank matrix
                 representation features",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "15:1--15:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542197",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, a novel framework based on trace norm
                 minimization for audio classification is proposed. In
                 this framework, both the feature extraction and
                 classification are obtained by solving corresponding
                 convex optimization problem with trace norm
                 regularization. For feature extraction, robust
                 principle component analysis (robust PCA) via
                 minimization a combination of the nuclear norm and the
                 l$_1$ -norm is used to extract low-rank matrix features
                 which are robust to white noise and gross corruption
                 for audio signal. These low-rank matrix features are
                 fed to a linear classifier where the weight and bias
                 are learned by solving similar trace norm constrained
                 problems. For this linear classifier, most methods find
                 the parameters, that is the weight matrix and bias in
                 batch-mode, which makes it inefficient for large scale
                 problems. In this article, we propose a parallel online
                 framework using accelerated proximal gradient method.
                 This framework has advantages in processing speed and
                 memory cost. In addition, as a result of the
                 regularization formulation of matrix classification,
                 the Lipschitz constant was given explicitly, and hence
                 the step size estimation of the general proximal
                 gradient method was omitted, and this part of computing
                 burden is saved in our approach. Extensive experiments
                 on real data sets for laugh/non-laugh and
                 applause/non-applause classification indicate that this
                 novel framework is effective and noise robust.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Osman:2013:TMA,
  author =       "Nardine Osman and Carles Sierra and Fiona Mcneill and
                 Juan Pane and John Debenham",
  title =        "Trust and matching algorithms for selecting suitable
                 agents",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "16:1--16:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542198",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article addresses the problem of finding suitable
                 agents to collaborate with for a given interaction in
                 distributed open systems, such as multiagent and P2P
                 systems. The agent in question is given the chance to
                 describe its confidence in its own capabilities.
                 However, since agents may be malicious, misinformed,
                 suffer from miscommunication, and so on, one also needs
                 to calculate how much trusted is that agent. This
                 article proposes a novel trust model that calculates
                 the expectation about an agent's future performance in
                 a given context by assessing both the agent's
                 willingness and capability through the semantic
                 comparison of the current context in question with the
                 agent's performance in past similar experiences. The
                 proposed mechanism for assessing trust may be applied
                 to any real world application where past commitments
                 are recorded and observations are made that assess
                 these commitments, and the model can then calculate
                 one's trust in another with respect to a future
                 commitment by assessing the other's past performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Montali:2013:MBC,
  author =       "Marco Montali and Fabrizio M. Maggi and Federico
                 Chesani and Paola Mello and Wil M. P. van der Aalst",
  title =        "Monitoring business constraints with the event
                 calculus",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "17:1--17:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542199",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Today, large business processes are composed of
                 smaller, autonomous, interconnected subsystems,
                 achieving modularity and robustness. Quite often, these
                 large processes comprise software components as well as
                 human actors, they face highly dynamic environments and
                 their subsystems are updated and evolve independently
                 of each other. Due to their dynamic nature and
                 complexity, it might be difficult, if not impossible,
                 to ensure at design-time that such systems will always
                 exhibit the desired/expected behaviors. This, in turn,
                 triggers the need for runtime verification and
                 monitoring facilities. These are needed to check
                 whether the actual behavior complies with expected
                 business constraints, internal/external regulations and
                 desired best practices. In this work, we present
                 Mobucon EC, a novel monitoring framework that tracks
                 streams of events and continuously determines the state
                 of business constraints. In Mobucon EC, business
                 constraints are defined using the declarative language
                 Declare. For the purpose of this work, Declare has been
                 suitably extended to support quantitative time
                 constraints and non-atomic, durative activities. The
                 logic-based language Event Calculus (EC) has been
                 adopted to provide a formal specification and semantics
                 to Declare constraints, while a light-weight, logic
                 programming-based EC tool supports dynamically
                 reasoning about partial, evolving execution traces. To
                 demonstrate the applicability of our approach, we
                 describe a case study about maritime safety and
                 security and provide a synthetic benchmark to evaluate
                 its scalability.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lu:2013:SBA,
  author =       "Qiang Lu and Ruoyun Huang and Yixin Chen and You Xu
                 and Weixiong Zhang and Guoliang Chen",
  title =        "A {SAT-based} approach to cost-sensitive temporally
                 expressive planning",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "18:1--18:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542200",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Complex features, such as temporal dependencies and
                 numerical cost constraints, are hallmarks of real-world
                 planning problems. In this article, we consider the
                 challenging problem of cost-sensitive temporally
                 expressive (CSTE) planning, which requires concurrency
                 of durative actions and optimization of action costs.
                 We first propose a scheme to translate a CSTE planning
                 problem to a minimum cost (MinCost) satisfiability
                 (SAT) problem and to integrate with a relaxed parallel
                 planning semantics for handling true temporal
                 expressiveness. Our scheme finds solution plans that
                 optimize temporal makespan, and also minimize total
                 action costs at the optimal makespan. We propose two
                 approaches for solving MinCost SAT. The first is based
                 on a transformation of a MinCost SAT problem to a
                 weighted partial Max-SAT (WPMax-SAT), and the second,
                 called BB-CDCL, is an integration of the
                 branch-and-bound technique and the conflict driven
                 clause learning (CDCL) method. We also develop a CSTE
                 customized variable branching scheme for BB-CDCL which
                 can significantly improve the search efficiency. Our
                 experiments on the existing CSTE benchmark domains show
                 that our planner compares favorably to the
                 state-of-the-art temporally expressive planners in both
                 efficiency and quality.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shieh:2013:RTS,
  author =       "Jyh-Ren Shieh and Ching-Yung Lin and Shun-Xuan Wang
                 and Ja-Ling Wu",
  title =        "Relational term-suggestion graphs incorporating
                 multipartite concept and expertise networks",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "19:1--19:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542201",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Term suggestions recommend query terms to a user based
                 on his initial query. Suggesting adequate terms is a
                 challenging issue. Most existing commercial search
                 engines suggest search terms based on the frequency of
                 prior used terms that match the leading alphabets the
                 user types. In this article, we present a novel
                 mechanism to construct semantic term-relation graphs to
                 suggest relevant search terms in the semantic level. We
                 built term-relation graphs based on multipartite
                 networks of existing social media, especially from
                 Wikipedia. The multipartite linkage networks of
                 contributor-term, term-category, and term-term are
                 extracted from Wikipedia to eventually form term
                 relation graphs. For fusing these multipartite linkage
                 networks, we propose to incorporate the
                 contributor-category networks to model the expertise of
                 the contributors. Based on our experiments, this step
                 has demonstrated clear enhancement on the accuracy of
                 the inferred relatedness of the term-semantic graphs.
                 Experiments on keyword-expanded search based on 200
                 TREC-5 ad-hoc topics showed obvious advantage of our
                 algorithms over existing approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2013:EEM,
  author =       "Tianshi Chen and Yunji Chen and Qi Guo and Zhi-Hua
                 Zhou and Ling Li and Zhiwei Xu",
  title =        "Effective and efficient microprocessor design space
                 exploration using unlabeled design configurations",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "20:1--20:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542202",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Ever-increasing design complexity and advances of
                 technology impose great challenges on the design of
                 modern microprocessors. One such challenge is to
                 determine promising microprocessor configurations to
                 meet specific design constraints, which is called
                 Design Space Exploration (DSE). In the computer
                 architecture community, supervised learning techniques
                 have been applied to DSE to build regression models for
                 predicting the qualities of design configurations. For
                 supervised learning, however, considerable simulation
                 costs are required for attaining the labeled design
                 configurations. Given limited resources, it is
                 difficult to achieve high accuracy. In this article,
                 inspired by recent advances in semisupervised learning
                 and active learning, we propose the COAL approach which
                 can exploit unlabeled design configurations to
                 significantly improve the models. Empirical study
                 demonstrates that COAL significantly outperforms a
                 state-of-the-art DSE technique by reducing mean squared
                 error by 35\% to 95\%, and thus, promising
                 architectures can be attained more efficiently.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Singh:2013:NBG,
  author =       "Munindar P. Singh",
  title =        "Norms as a basis for governing sociotechnical
                 systems",
  journal =      j-TIST,
  volume =       "5",
  number =       "1",
  pages =        "21:1--21:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542182.2542203",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Mar 13 07:29:16 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We understand a sociotechnical system as a
                 multistakeholder cyber-physical system. We introduce
                 governance as the administration of such a system by
                 the stakeholders themselves. In this regard, governance
                 is a peer-to-peer notion and contrasts with traditional
                 management, which is a top-down hierarchical notion.
                 Traditionally, there is no computational support for
                 governance and it is achieved through out-of-band
                 interactions among system administrators. Not
                 surprisingly, traditional approaches simply do not
                 scale up to large sociotechnical systems. We develop an
                 approach for governance based on a computational
                 representation of norms in organizations. Our approach
                 is motivated by the Ocean Observatory Initiative, a
                 thirty-year \$400 million project, which supports a
                 variety of resources dealing with monitoring and
                 studying the world's oceans. These resources include
                 autonomous underwater vehicles, ocean gliders, buoys,
                 and other instrumentation as well as more traditional
                 computational resources. Our approach has the benefit
                 of directly reflecting stakeholder needs and assuring
                 stakeholders of the correctness of the resulting
                 governance decisions while yielding adaptive resource
                 allocation in the face of changes in both stakeholder
                 needs and physical circumstances.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{He:2014:ISI,
  author =       "Qi He and Juanzi Li and Rong Yan and John Yen and
                 Haizheng Zhang",
  title =        "Introduction to the {Special Issue on Linking Social
                 Granularity and Functions}",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "22:1--22:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594452",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2014:IUI,
  author =       "Jinpeng Wang and Wayne Xin Zhao and Yulan He and
                 Xiaoming Li",
  title =        "Infer User Interests via Link Structure
                 Regularization",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "23:1--23:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2499380",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Learning user interests from online social networks
                 helps to better understand user behaviors and provides
                 useful guidance to design user-centric applications.
                 Apart from analyzing users' online content, it is also
                 important to consider users' social connections in the
                 social Web. Graph regularization methods have been
                 widely used in various text mining tasks, which can
                 leverage the graph structure information extracted from
                 data. Previously, graph regularization methods operate
                 under the cluster assumption that nearby nodes are more
                 similar and nodes on the same structure (typically
                 referred to as a cluster or a manifold) are likely to
                 be similar. We argue that learning user interests from
                 complex, sparse, and dynamic social networks should be
                 based on the link structure assumption under which node
                 similarities are evaluated based on the local link
                 structures instead of explicit links between two nodes.
                 We propose a regularization framework based on the
                 relation bipartite graph, which can be constructed from
                 any type of relations. Using Twitter as our case study,
                 we evaluate our proposed framework from social networks
                 built from retweet relations. Both quantitative and
                 qualitative experiments show that our proposed method
                 outperforms a few competitive baselines in learning
                 user interests over a set of predefined topics. It also
                 gives superior results compared to the baselines on
                 retweet prediction and topical authority
                 identification.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Javari:2014:CBC,
  author =       "Amin Javari and Mahdi Jalili",
  title =        "Cluster-Based Collaborative Filtering for Sign
                 Prediction in Social Networks with Positive and
                 Negative Links",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "24:1--24:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2501977",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Social network analysis and mining get
                 ever-increasingly important in recent years, which is
                 mainly due to the availability of large datasets and
                 advances in computing systems. A class of social
                 networks is those with positive and negative links. In
                 such networks, a positive link indicates friendship (or
                 trust), whereas links with a negative sign correspond
                 to enmity (or distrust). Predicting the sign of the
                 links in these networks is an important issue and has
                 many applications, such as friendship recommendation
                 and identifying malicious nodes in the network. In this
                 manuscript, we proposed a new method for sign
                 prediction in networks with positive and negative
                 links. Our algorithm is based first on clustering the
                 network into a number of clusters and then applying a
                 collaborative filtering algorithm. The clusters are
                 such that the number of intra-cluster negative links
                 and inter-cluster positive links are minimal, that is,
                 the clusters are socially balanced as much as possible
                 (a signed graph is socially balanced if it can be
                 divided into clusters with all positive links inside
                 the clusters and all negative links between them). We
                 then used similarity between the clusters (based on the
                 links between them) in a collaborative filtering
                 algorithm. Our experiments on a number of real datasets
                 showed that the proposed method outperformed previous
                 methods, including those based on social balance and
                 status theories and one based on a machine learning
                 framework (logistic regression in this work).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2014:CCB,
  author =       "Yi-Cheng Chen and Wen-Yuan Zhu and Wen-Chih Peng and
                 Wang-Chien Lee and Suh-Yin Lee",
  title =        "{CIM}: Community-Based Influence Maximization in
                 Social Networks",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "25:1--25:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532549",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Given a social graph, the problem of influence
                 maximization is to determine a set of nodes that
                 maximizes the spread of influences. While some recent
                 research has studied the problem of influence
                 maximization, these works are generally too time
                 consuming for practical use in a large-scale social
                 network. In this article, we develop a new framework,
                 community-based influence maximization (CIM), to tackle
                 the influence maximization problem with an emphasis on
                 the time efficiency issue. Our proposed framework, CIM,
                 comprises three phases: (i) community detection, (ii)
                 candidate generation, and (iii) seed selection.
                 Specifically, phase (i) discovers the community
                 structure of the network; phase (ii) uses the
                 information of communities to narrow down the possible
                 seed candidates; and phase (iii) finalizes the seed
                 nodes from the candidate set. By exploiting the
                 properties of the community structures, we are able to
                 avoid overlapped information and thus efficiently
                 select the number of seeds to maximize information
                 spreads. The experimental results on both synthetic and
                 real datasets show that the proposed CIM algorithm
                 significantly outperforms the state-of-the-art
                 algorithms in terms of efficiency and scalability, with
                 almost no compromise of effectiveness.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yang:2014:SOG,
  author =       "Jaewon Yang and Jure Leskovec",
  title =        "Structure and Overlaps of Ground-Truth Communities in
                 Networks",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "26:1--26:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594454",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "One of the main organizing principles in real-world
                 networks is that of network communities, where sets of
                 nodes organize into densely linked clusters. Even
                 though detection of such communities is of great
                 interest, understanding the structure communities in
                 large networks remains relatively limited. In
                 particular, due to the unavailability of labeled
                 ground-truth data, it was traditionally very hard to
                 develop accurate models of network community structure.
                 Here we use six large social, collaboration, and
                 information networks where nodes explicitly state their
                 ground-truth community memberships. For example, nodes
                 in social networks join into explicitly defined
                 interest based groups, and we use such groups as
                 explicitly labeled ground-truth communities. We use
                 such ground-truth communities to study their structural
                 signatures by analyzing how ground-truth communities
                 emerge in networks and how they overlap. We observe
                 some surprising phenomena. First, ground-truth
                 communities contain high-degree hub nodes that reside
                 in community overlaps and link to most of the members
                 of the community. Second, the overlaps of communities
                 are more densely connected than the non-overlapping
                 parts of communities. We show that this in contrast to
                 the conventional wisdom that community overlaps are
                 more sparsely connected than the non-overlapping parts
                 themselves. We then show that many existing models of
                 network communities do not capture dense community
                 overlaps. This in turn means that most present models
                 and community detection methods confuse overlaps as
                 separate communities. In contrast, we present the
                 community-affiliation graph model (AGM), a conceptual
                 model of network community structure. We demonstrate
                 that AGM reliably captures the overall structure of
                 networks as well as the overlapping and hierarchical
                 nature of network communities.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gong:2014:JLP,
  author =       "Neil Zhenqiang Gong and Ameet Talwalkar and Lester
                 Mackey and Ling Huang and Eui Chul Richard Shin and
                 Emil Stefanov and Elaine (Runting) Shi and Dawn Song",
  title =        "Joint Link Prediction and Attribute Inference Using a
                 Social-Attribute Network",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "27:1--27:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594455",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The effects of social influence and homophily suggest
                 that both network structure and node-attribute
                 information should inform the tasks of link prediction
                 and node-attribute inference. Recently, Yin et al.
                 [2010a, 2010b] proposed an attribute-augmented social
                 network model, which we call Social-Attribute Network
                 (SAN), to integrate network structure and node
                 attributes to perform both link prediction and
                 attribute inference. They focused on generalizing the
                 random walk with a restart algorithm to the SAN
                 framework and showed improved performance. In this
                 article, we extend the SAN framework with several
                 leading supervised and unsupervised link-prediction
                 algorithms and demonstrate performance improvement for
                 each algorithm on both link prediction and attribute
                 inference. Moreover, we make the novel observation that
                 attribute inference can help inform link prediction,
                 that is, link-prediction accuracy is further improved
                 by first inferring missing attributes. We
                 comprehensively evaluate these algorithms and compare
                 them with other existing algorithms using a novel,
                 large-scale Google+ dataset, which we make publicly
                 available
                 (http://www.cs.berkeley.edu/~stevgong/gplus.html).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Pool:2014:DDC,
  author =       "Simon Pool and Francesco Bonchi and Matthijs van
                 Leeuwen",
  title =        "Description-Driven Community Detection",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "28:1--28:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2517088",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Traditional approaches to community detection, as
                 studied by physicists, sociologists, and more recently
                 computer scientists, aim at simply partitioning the
                 social network graph. However, with the advent of
                 online social networking sites, richer data has become
                 available: beyond the link information, each user in
                 the network is annotated with additional information,
                 for example, demographics, shopping behavior, or
                 interests. In this context, it is therefore important
                 to develop mining methods which can take advantage of
                 all available information. In the case of community
                 detection, this means finding good communities (a set
                 of nodes cohesive in the social graph) which are
                 associated with good descriptions in terms of user
                 information (node attributes). Having good descriptions
                 associated to our models make them understandable by
                 domain experts and thus more useful in real-world
                 applications. Another requirement dictated by
                 real-world applications, is to develop methods that can
                 use, when available, any domain-specific background
                 knowledge. In the case of community detection the
                 background knowledge could be a vague description of
                 the communities sought in a specific application, or
                 some prototypical nodes (e.g., good customers in the
                 past), that represent what the analyst is looking for
                 (a community of similar users). Towards this goal, in
                 this article, we define and study the problem of
                 finding a diverse set of cohesive communities with
                 concise descriptions. We propose an effective algorithm
                 that alternates between two phases: a hill-climbing
                 phase producing (possibly overlapping) communities, and
                 a description induction phase which uses techniques
                 from supervised pattern set mining. Our framework has
                 the nice feature of being able to build well-described
                 cohesive communities starting from any given
                 description or seed set of nodes, which makes it very
                 flexible and easily applicable in real-world
                 applications. Our experimental evaluation confirms that
                 the proposed method discovers cohesive communities with
                 concise descriptions in realistic and large online
                 social networks such as Delicious, Flickr, and
                 LastFM.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2014:LPH,
  author =       "Nan Li and William Cushing and Subbarao Kambhampati
                 and Sungwook Yoon",
  title =        "Learning Probabilistic Hierarchical Task Networks as
                 Probabilistic Context-Free Grammars to Capture User
                 Preferences",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "29:1--29:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2589481",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We introduce an algorithm to automatically learn
                 probabilistic hierarchical task networks (pHTNs) that
                 capture a user's preferences on plans by observing only
                 the user's behavior. HTNs are a common choice of
                 representation for a variety of purposes in planning,
                 including work on learning in planning. Our
                 contributions are twofold. First, in contrast with
                 prior work, which employs HTNs to represent domain
                 physics or search control knowledge, we use HTNs to
                 model user preferences. Second, while most prior work
                 on HTN learning requires additional information (e.g.,
                 annotated traces or tasks) to assist the learning
                 process, our system only takes plan traces as input.
                 Initially, we will assume that users carry out
                 preferred plans more frequently, and thus the observed
                 distribution of plans is an accurate representation of
                 user preference. We then generalize to the situation
                 where feasibility constraints frequently prevent the
                 execution of preferred plans. Taking the prevalent
                 perspective of viewing HTNs as grammars over primitive
                 actions, we adapt an expectation-maximization (EM)
                 technique from the discipline of probabilistic grammar
                 induction to acquire probabilistic context-free
                 grammars (pCFG) that capture the distribution on plans.
                 To account for the difference between the distributions
                 of possible and preferred plans, we subsequently modify
                 this core EM technique by rescaling its input. We
                 empirically demonstrate that the proposed approaches
                 are able to learn HTNs representing user preferences
                 better than the inside-outside algorithm. Furthermore,
                 when feasibility constraints are obfuscated, the
                 algorithm with rescaled input performs better than the
                 algorithm with the original input.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Reches:2014:FEC,
  author =       "Shulamit Reches and Meir Kalech and Philip Hendrix",
  title =        "A Framework for Effectively Choosing between
                 Alternative Candidate Partners",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "30:1--30:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2589482",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Many multi-agent settings require that agents identify
                 appropriate partners or teammates with whom to work on
                 tasks. When selecting potential partners, agents may
                 benefit from obtaining information about the
                 alternatives, for instance, through gossip (i.e., by
                 consulting others) or reputation systems. When
                 information is uncertain and associated with cost,
                 deciding on the amount of information needed is a hard
                 optimization problem. This article defines a
                 statistical model, the Information-Acquisition Source
                 Utility model (IASU), by which agents, operating in an
                 uncertain world, can determine (1) which information
                 sources they should request for information, and (2)
                 the amount of information to collect about potential
                 partners from each source. To maximize the expected
                 gain from the choice, IASU computes the utility of
                 choosing a partner by estimating the benefit of
                 additional information. The article presents empirical
                 studies through a simulation domain as well as a
                 real-world domain of restaurants. We compare the IASU
                 model to other relevant models and show that the use of
                 the IASU model significantly increases agents' overall
                 utility.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Heath:2014:CST,
  author =       "Derrall Heath and David Norton and Dan Ventura",
  title =        "Conveying Semantics through Visual Metaphor",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "31:1--31:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2589483",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In the field of visual art, metaphor is a way to
                 communicate meaning to the viewer. We present a
                 computational system for communicating visual metaphor
                 that can identify adjectives for describing an image
                 based on a low-level visual feature representation of
                 the image. We show that the system can use this
                 visual-linguistic association to render source images
                 that convey the meaning of adjectives in a way
                 consistent with human understanding. Our conclusions
                 are based on a detailed analysis of how the system's
                 artifacts cluster, how these clusters correspond to the
                 semantic relationships of adjectives as documented in
                 WordNet, and how these clusters correspond to human
                 opinion.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lian:2014:MCH,
  author =       "Defu Lian and Xing Xie",
  title =        "Mining Check-In History for Personalized Location
                 Naming",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "32:1--32:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2490890",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Many innovative location-based services have been
                 established to offer users greater convenience in their
                 everyday lives. These services usually cannot map
                 user's physical locations into semantic names
                 automatically. The semantic names of locations provide
                 important context for mobile recommendations and
                 advertisements. In this article, we proposed a novel
                 location naming approach which can automatically
                 provide semantic names for users given their locations
                 and time. In particular, when a user opens a GPS device
                 and submits a query with her physical location and
                 time, she will be returned the most appropriate
                 semantic name. In our approach, we drew an analogy
                 between location naming and local search, and designed
                 a local search framework to propose a spatiotemporal
                 and user preference (STUP) model for location naming.
                 STUP combined three components, user preference (UP),
                 spatial preference (SP), and temporal preference (TP),
                 by leveraging learning-to-rank techniques. We evaluated
                 STUP on 466,190 check-ins of 5,805 users from Shanghai
                 and 135,052 check-ins of 1,361 users from Beijing. The
                 results showed that SP was most effective among three
                 components and that UP can provide personalized
                 semantic names, and thus it was a necessity for
                 location naming. Although TP was not as discriminative
                 as the others, it can still be beneficial when
                 integrated with SP and UP. Finally, according to the
                 experimental results, STUP outperformed the proposed
                 baselines and returned accurate semantic names for
                 23.6\% and 26.6\% of the testing queries from Beijing
                 and Shanghai, respectively.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bian:2014:EUP,
  author =       "Jiang Bian and Bo Long and Lihong Li and Taesup Moon
                 and Anlei Dong and Yi Chang",
  title =        "Exploiting User Preference for Online Learning in
                 {Web} Content Optimization Systems",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "33:1--33:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2493259",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Web portal services have become an important medium to
                 deliver digital content (e.g. news, advertisements,
                 etc.) to Web users in a timely fashion. To attract more
                 users to various content modules on the Web portal, it
                 is necessary to design a recommender system that can
                 effectively achieve Web portal content optimization by
                 automatically estimating content item attractiveness
                 and relevance to user interests. The state-of-the-art
                 online learning methodology adapts dedicated pointwise
                 models to independently estimate the attractiveness
                 score for each candidate content item. Although such
                 pointwise models can be easily adapted for online
                 recommendation, there still remain a few critical
                 problems. First, this pointwise methodology fails to
                 use invaluable user preferences between content items.
                 Moreover, the performance of pointwise models decreases
                 drastically when facing the problem of sparse learning
                 samples. To address these problems, we propose
                 exploring a new dynamic pairwise learning methodology
                 for Web portal content optimization in which we exploit
                 dynamic user preferences extracted based on users'
                 actions on portal services to compute the
                 attractiveness scores of content items. In this
                 article, we introduce two specific pairwise learning
                 algorithms, a straightforward graph-based algorithm and
                 a formalized Bayesian modeling one. Experiments on
                 large-scale data from a commercial Web portal
                 demonstrate the significant improvement of pairwise
                 methodologies over the baseline pointwise models.
                 Further analysis illustrates that our new pairwise
                 learning approaches can benefit personalized
                 recommendation more than pointwise models, since the
                 data sparsity is more critical for personalized content
                 optimization.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hossain:2014:AFS,
  author =       "M. Shahriar Hossain and Manish Marwah and Amip Shah
                 and Layne T. Watson and Naren Ramakrishnan",
  title =        "{AutoLCA}: a Framework for Sustainable Redesign and
                 Assessment of Products",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "34:1--34:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2505270",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With increasing public consciousness regarding
                 sustainability, companies are ever more eager to
                 introduce eco-friendly products and services. Assessing
                 environmental footprints and designing sustainable
                 products are challenging tasks since they require
                 analysis of each component of a product through their
                 life cycle. To achieve sustainable design of products,
                 companies need to evaluate the environmental impact of
                 their system, identify the major contributors to the
                 footprint, and select the design alternative with the
                 lowest environmental footprint. In this article, we
                 formulate sustainable design as a series of clustering
                 and classification problems, and propose a framework
                 called AutoLCA that simplifies the effort of estimating
                 the environmental footprint of a product bill of
                 materials by more than an order of magnitude over
                 current methods, which are mostly labor intensive. We
                 apply AutoLCA to real data from a large computer
                 manufacturer. We conduct a case study on bill of
                 materials of four different products, perform a
                 ``hotspot'' assessment analysis to identify major
                 contributors to carbon footprint, and determine design
                 alternatives that can reduce the carbon footprint from
                 1\% to 36\%.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "34",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shi:2014:MLC,
  author =       "Chuan Shi and Xiangnan Kong and Di Fu and Philip S. Yu
                 and Bin Wu",
  title =        "Multi-Label Classification Based on Multi-Objective
                 Optimization",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "35:1--35:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2505272",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Multi-label classification refers to the task of
                 predicting potentially multiple labels for a given
                 instance. Conventional multi-label classification
                 approaches focus on single objective setting, where the
                 learning algorithm optimizes over a single performance
                 criterion (e.g., Ranking Loss ) or a heuristic
                 function. The basic assumption is that the optimization
                 over one single objective can improve the overall
                 performance of multi-label classification and meet the
                 requirements of various applications. However, in many
                 real applications, an optimal multi-label classifier
                 may need to consider the trade-offs among multiple
                 inconsistent objectives, such as minimizing Hamming
                 Loss while maximizing Micro F1. In this article, we
                 study the problem of multi-objective multi-label
                 classification and propose a novel solution (called
                 Moml) to optimize over multiple objectives
                 simultaneously. Note that optimization objectives may
                 be inconsistent, even conflicting, thus one cannot
                 identify a single solution that is optimal on all
                 objectives. Our Moml algorithm finds a set of
                 non-dominated solutions which are optimal according to
                 different trade-offs among multiple objectives. So
                 users can flexibly construct various predictive models
                 from the solution set, which provides more meaningful
                 classification results in different application
                 scenarios. Empirical studies on real-world tasks
                 demonstrate that the Moml can effectively boost the
                 overall performance of multi-label classification by
                 optimizing over multiple objectives simultaneously.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "35",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tang:2014:DSM,
  author =       "Xuning Tang and Christopher C. Yang",
  title =        "Detecting Social Media Hidden Communities Using
                 Dynamic Stochastic Blockmodel with Temporal {Dirichlet}
                 Process",
  journal =      j-TIST,
  volume =       "5",
  number =       "2",
  pages =        "36:1--36:??",
  month =        apr,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2517085",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Apr 24 16:09:50 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Detecting evolving hidden communities within dynamic
                 social networks has attracted significant attention
                 recently due to its broad applications in e-commerce,
                 online social media, security intelligence, public
                 health, and other areas. Many community network
                 detection techniques employ a two-stage approach to
                 identify and detect evolutionary relationships between
                 communities of two adjacent time epochs. These
                 techniques often identify communities with high
                 temporal variation, since the two-stage approach
                 detects communities of each epoch independently without
                 considering the continuity of communities across two
                 time epochs. Other techniques require identification of
                 a predefined number of hidden communities which is not
                 realistic in many applications. To overcome these
                 limitations, we propose the Dynamic Stochastic
                 Blockmodel with Temporal Dirichlet Process, which
                 enables the detection of hidden communities and tracks
                 their evolution simultaneously from a network stream.
                 The number of hidden communities is automatically
                 determined by a temporal Dirichlet process without
                 human intervention. We tested our proposed technique on
                 three different testbeds with results identifying a
                 high performance level when compared to the baseline
                 algorithm.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "36",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zheng:2014:ISS,
  author =       "Yu Zheng and Licia Capra and Ouri Wolfson and Hai
                 Yang",
  title =        "Introduction to the Special Section on Urban
                 Computing",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "37:1--37:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2642650",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "37",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zheng:2014:UCC,
  author =       "Yu Zheng and Licia Capra and Ouri Wolfson and Hai
                 Yang",
  title =        "Urban Computing: Concepts, Methodologies, and
                 Applications",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "38:1--38:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629592",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Urbanization's rapid progress has modernized many
                 people's lives but also engendered big issues, such as
                 traffic congestion, energy consumption, and pollution.
                 Urban computing aims to tackle these issues by using
                 the data that has been generated in cities (e.g.,
                 traffic flow, human mobility, and geographical data).
                 Urban computing connects urban sensing, data
                 management, data analytics, and service providing into
                 a recurrent process for an unobtrusive and continuous
                 improvement of people's lives, city operation systems,
                 and the environment. Urban computing is an
                 interdisciplinary field where computer sciences meet
                 conventional city-related fields, like transportation,
                 civil engineering, environment, economy, ecology, and
                 sociology in the context of urban spaces. This article
                 first introduces the concept of urban computing,
                 discussing its general framework and key challenges
                 from the perspective of computer sciences. Second, we
                 classify the applications of urban computing into seven
                 categories, consisting of urban planning,
                 transportation, the environment, energy, social,
                 economy, and public safety and security, presenting
                 representative scenarios in each category. Third, we
                 summarize the typical technologies that are needed in
                 urban computing into four folds, which are about urban
                 sensing, urban data management, knowledge fusion across
                 heterogeneous data, and urban data visualization.
                 Finally, we give an outlook on the future of urban
                 computing, suggesting a few research topics that are
                 somehow missing in the community.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "38",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Etienne:2014:MBC,
  author =       "C{\^o}me Etienne and Oukhellou Latifa",
  title =        "Model-Based Count Series Clustering for Bike Sharing
                 System Usage Mining: a Case Study with the {V{\'e}lib'}
                 System of {Paris}",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "39:1--39:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2560188",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Today, more and more bicycle sharing systems (BSSs)
                 are being introduced in big cities. These
                 transportation systems generate sizable transportation
                 data, the mining of which can reveal the underlying
                 urban phenomenon linked to city dynamics. This article
                 presents a statistical model to automatically analyze
                 the trip data of a bike sharing system. The proposed
                 solution partitions (i.e., clusters) the stations
                 according to their usage profiles. To do so, count
                 series describing the stations's usage through
                 departure/arrival counts per hour throughout the day
                 are built and analyzed. The model for processing these
                 count series is based on Poisson mixtures and
                 introduces a station scaling factor that handles the
                 differences between the stations's global usage.
                 Differences between weekday and weekend usage are also
                 taken into account. This model identifies the latent
                 factors that shape the geography of trips, and the
                 results may thus offer insights into the relationships
                 between station neighborhood type (its amenities, its
                 demographics, etc.) and the generated mobility
                 patterns. In other words, the proposed method brings to
                 light the different functions in different areas that
                 induce specific patterns in BSS data. These potentials
                 are demonstrated through an in-depth analysis of the
                 results obtained on the Paris V{\'e}lib' large-scale
                 bike sharing system.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "39",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ying:2014:MUC,
  author =       "Josh Jia-Ching Ying and Wen-Ning Kuo and Vincent S.
                 Tseng and Eric Hsueh-Chan Lu",
  title =        "Mining User Check-In Behavior with a Random Walk for
                 Urban Point-of-Interest Recommendations",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "40:1--40:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2523068",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In recent years, research into the mining of user
                 check-in behavior for point-of-interest (POI)
                 recommendations has attracted a lot of attention.
                 Existing studies on this topic mainly treat such
                 recommendations in a traditional manner-that is, they
                 treat POIs as items and check-ins as ratings. However,
                 users usually visit a place for reasons other than to
                 simply say that they have visited. In this article, we
                 propose an approach referred to as Urban POI-Walk
                 (UPOI-Walk), which takes into account a user's
                 social-triggered intentions (SI), preference-triggered
                 intentions (PreI), and popularity-triggered intentions
                 (PopI), to estimate the probability of a user
                 checking-in to a POI. The core idea of UPOI-Walk
                 involves building a HITS-based random walk on the
                 normalized check-in network, thus supporting the
                 prediction of POI properties related to each user's
                 preferences. To achieve this goal, we define several
                 user--POI graphs to capture the key properties of the
                 check-in behavior motivated by user intentions. In our
                 UPOI-Walk approach, we propose a new kind of random
                 walk model-Dynamic HITS-based Random Walk-which
                 comprehensively considers the relevance between POIs
                 and users from different aspects. On the basis of
                 similitude, we make an online recommendation as to the
                 POI the user intends to visit. To the best of our
                 knowledge, this is the first work on urban POI
                 recommendations that considers user check-in behavior
                 motivated by SI, PreI, and PopI in location-based
                 social network data. Through comprehensive experimental
                 evaluations on two real datasets, the proposed
                 UPOI-Walk is shown to deliver excellent performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "40",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Mcardle:2014:UDF,
  author =       "Gavin Mcardle and Eoghan Furey and Aonghus Lawlor and
                 Alexei Pozdnoukhov",
  title =        "Using Digital Footprints for a City-Scale Traffic
                 Simulation",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "41:1--41:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2517028",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article introduces a microsimulation of urban
                 traffic flows within a large-scale scenario implemented
                 for the Greater Dublin region in Ireland.
                 Traditionally, the data available for traffic
                 simulations come from a population census and dedicated
                 road surveys that only partly cover shopping, leisure,
                 or recreational trips. To account for the latter, the
                 presented traffic modeling framework exploits the
                 digital footprints of city inhabitants on services such
                 as Twitter and Foursquare. We enriched the model with
                 findings from our previous studies on geographical
                 layout of communities in a country-wide mobile phone
                 network to account for socially related journeys. These
                 datasets were used to calibrate a variant of a
                 radiation model of spatial choice, which we introduced
                 in order to drive individuals' decisions on trip
                 destinations within an assigned daily activity plan. We
                 observed that given the distribution of population, the
                 workplace locations, a comprehensive set of urban
                 facilities, and a list of typical activity sequences of
                 city dwellers collected within a national travel
                 survey, the developed microsimulation reproduces not
                 only the journey statistics such as peak travel periods
                 but also the traffic volumes at main road segments with
                 surprising accuracy.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "41",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Momtazpour:2014:CSI,
  author =       "Marjan Momtazpour and Patrick Butler and Naren
                 Ramakrishnan and M. Shahriar Hossain and Mohammad C.
                 Bozchalui and Ratnesh Sharma",
  title =        "Charging and Storage Infrastructure Design for
                 Electric Vehicles",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "42:1--42:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2513567",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Ushered by recent developments in various areas of
                 science and technology, modern energy systems are going
                 to be an inevitable part of our societies. Smart grids
                 are one of these modern systems that have attracted
                 many research activities in recent years. Before
                 utilizing the next generation of smart grids, we should
                 have a comprehensive understanding of the
                 interdependent energy networks and processes.
                 Next-generation energy systems networks cannot be
                 effectively designed, analyzed, and controlled in
                 isolation from the social, economic, sensing, and
                 control contexts in which they operate. In this
                 article, we present a novel framework to support
                 charging and storage infrastructure design for electric
                 vehicles. We develop coordinated clustering techniques
                 to work with network models of urban environments to
                 aid in placement of charging stations for an electrical
                 vehicle deployment scenario. Furthermore, we evaluate
                 the network before and after the deployment of charging
                 stations, to recommend the installation of appropriate
                 storage units to overcome the extra load imposed on the
                 network by the charging stations. We demonstrate the
                 multiple factors that can be simultaneously leveraged
                 in our framework to achieve practical urban deployment.
                 Our ultimate goal is to help realize sustainable energy
                 system management in urban electrical infrastructure by
                 modeling and analyzing networks of interactions between
                 electric systems and urban populations.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "42",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tan:2014:OOT,
  author =       "Chang Tan and Qi Liu and Enhong Chen and Hui Xiong and
                 Xiang Wu",
  title =        "Object-Oriented Travel Package Recommendation",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "43:1--43:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542665",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Providing better travel services for tourists is one
                 of the important applications in urban computing.
                 Though many recommender systems have been developed for
                 enhancing the quality of travel service, most of them
                 lack a systematic and open framework to dynamically
                 incorporate multiple types of additional context
                 information existing in the tourism domain, such as the
                 travel area, season, and price of travel packages. To
                 that end, in this article, we propose an open
                 framework, the Objected-Oriented Recommender System
                 (ORS), for the developers performing personalized
                 travel package recommendations to tourists. This
                 framework has the ability to import all the available
                 additional context information to the travel package
                 recommendation process in a cost-effective way.
                 Specifically, the different types of additional
                 information are extracted and uniformly represented as
                 feature--value pairs. Then, we define the Object, which
                 is the collection of the feature--value pairs. We
                 propose two models that can be used in the ORS
                 framework for extracting the implicit relationships
                 among Objects. The Objected-Oriented Topic Model (OTM)
                 can extract the topics conditioned on the intrinsic
                 feature--value pairs of the Objects. The
                 Objected-Oriented Bayesian Network (OBN) can
                 effectively infer the cotravel probability of two
                 tourists by calculating the co-occurrence time of
                 feature--value pairs belonging to different kinds of
                 Objects. Based on the relationships mined by OTM or
                 OBN, the recommendation list is generated by the
                 collaborative filtering method. Finally, we evaluate
                 these two models and the ORS framework on real-world
                 travel package data, and the experimental results show
                 that the ORS framework is more flexible in terms of
                 incorporating additional context information, and thus
                 leads to better performances for travel package
                 recommendations. Meanwhile, for feature selection in
                 ORS, we define the feature information entropy, and the
                 experimental results demonstrate that using features
                 with lower entropies usually leads to better
                 recommendation results.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "43",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Gurung:2014:TIP,
  author =       "Sashi Gurung and Dan Lin and Wei Jiang and Ali Hurson
                 and Rui Zhang",
  title =        "Traffic Information Publication with Privacy
                 Preservation",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "44:1--44:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542666",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We are experiencing the expanding use of
                 location-based services such as AT\&T's TeleNav GPS
                 Navigator and Intel's Thing Finder. Existing
                 location-based services have collected a large amount
                 of location data, which has great potential for
                 statistical usage in applications like traffic flow
                 analysis, infrastructure planning, and advertisement
                 dissemination. The key challenge is how to wisely use
                 the data without violating each user's location privacy
                 concerns. In this article, we first identify a new
                 privacy problem, namely, the inference-route problem,
                 and then present our anonymization algorithms for
                 privacy-preserving trajectory publishing. The
                 experimental results have demonstrated that our
                 approach outperforms the latest related work in terms
                 of both efficiency and effectiveness.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "44",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hsieh:2014:MRT,
  author =       "Hsun-Ping Hsieh and Cheng-Te Li and Shou-De Lin",
  title =        "Measuring and Recommending Time-Sensitive Routes from
                 Location-Based Data",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "45:1--45:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542668",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Location-based services allow users to perform
                 geospatial recording actions, which facilitates the
                 mining of the moving activities of human beings. This
                 article proposes to recommend time-sensitive trip
                 routes consisting of a sequence of locations with
                 associated timestamps based on knowledge extracted from
                 large-scale timestamped location sequence data (e.g.,
                 check-ins and GPS traces). We argue that a good route
                 should consider (a) the popularity of places, (b) the
                 visiting order of places, (c) the proper visiting time
                 of each place, and (d) the proper transit time from one
                 place to another. By devising a statistical model, we
                 integrate these four factors into a route goodness
                 function that aims to measure the quality of a route.
                 Equipped with the route goodness, we recommend
                 time-sensitive routes for two scenarios. The first is
                 about constructing the route based on the
                 user-specified source location with the starting time.
                 The second is about composing the route between the
                 specified source location and the destination location
                 given a starting time. To handle these queries, we
                 propose a search method, Guidance Search, which
                 consists of a novel heuristic satisfaction function
                 that guides the search toward the destination location
                 and a backward checking mechanism to boost the
                 effectiveness of the constructed route. Experiments on
                 the Gowalla check-in datasets demonstrate the
                 effectiveness of our model on detecting real routes and
                 performing cloze test of routes, comparing with other
                 baseline methods. We also develop a system TripRouter
                 as a real-time demo platform.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "45",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Joseph:2014:CIB,
  author =       "Kenneth Joseph and Kathleen M. Carley and Jason I.
                 Hong",
  title =        "Check-ins in {``Blau Space''}: Applying {Blau}'s
                 Macrosociological Theory to Foursquare Check-ins from
                 New {York} City",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "46:1--46:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2566617",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Peter Blau was one of the first to define a latent
                 social space and utilize it to provide concrete
                 hypotheses. Blau defines social structure via social
                 ``parameters'' (constraints). Actors that are closer
                 together (more homogeneous) in this social parameter
                 space are more likely to interact. One of Blau's most
                 important hypotheses resulting from this work was that
                 the consolidation of parameters could lead to isolated
                 social groups. For example, the consolidation of race
                 and income might lead to segregation. In the present
                 work, we use Foursquare data from New York City to
                 explore evidence of homogeneity along certain social
                 parameters and consolidation that breeds social
                 isolation in communities of locations checked in to by
                 similar users. More specifically, we first test the
                 extent to which communities detected via Latent
                 Dirichlet Allocation are homogeneous across a set of
                 four social constraints-racial homophily, income
                 homophily, personal interest homophily and physical
                 space. Using a bootstrapping approach, we find that 14
                 (of 20) communities are statistically, and all but one
                 qualitatively, homogeneous along one of these social
                 constraints, showing the relevance of Blau's latent
                 space model in venue communities determined via user
                 check-in behavior. We then consider the extent to which
                 communities with consolidated parameters, those
                 homogeneous on more than one parameter, represent
                 socially isolated populations. We find communities
                 homogeneous on multiple parameters, including a
                 homosexual community and a ``hipster'' community, that
                 show support for Blau's hypothesis that consolidation
                 breeds social isolation. We consider these results in
                 the context of mediated communication, in particular in
                 the context of self-representation on social media.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "46",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Mahmud:2014:HLI,
  author =       "Jalal Mahmud and Jeffrey Nichols and Clemens Drews",
  title =        "Home Location Identification of {Twitter} Users",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "47:1--47:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2528548",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We present a new algorithm for inferring the home
                 location of Twitter users at different granularities,
                 including city, state, time zone, or geographic region,
                 using the content of users' tweets and their tweeting
                 behavior. Unlike existing approaches, our algorithm
                 uses an ensemble of statistical and heuristic
                 classifiers to predict locations and makes use of a
                 geographic gazetteer dictionary to identify place-name
                 entities. We find that a hierarchical classification
                 approach, where time zone, state, or geographic region
                 is predicted first and city is predicted next, can
                 improve prediction accuracy. We have also analyzed
                 movement variations of Twitter users, built a
                 classifier to predict whether a user was travelling in
                 a certain period of time, and use that to further
                 improve the location detection accuracy. Experimental
                 evidence suggests that our algorithm works well in
                 practice and outperforms the best existing algorithms
                 for predicting the home location of Twitter users.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "47",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Neviarouskaya:2014:IIT,
  author =       "Alena Neviarouskaya and Masaki Aono and Helmut
                 Prendinger and Mitsuru Ishizuka",
  title =        "Intelligent Interface for Textual Attitude Analysis",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "48:1--48:??",
  month =        sep,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2535912",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:08 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article describes a novel intelligent interface
                 for attitude sensing in text driven by a robust
                 computational tool for the analysis of fine-grained
                 attitudes (emotions, judgments, and appreciations)
                 expressed in text. The module responsible for textual
                 attitude analysis was developed using a compositional
                 linguistic approach based on the attitude-conveying
                 lexicon, the analysis of syntactic and dependency
                 relations between words in a sentence, the
                 compositionality principle applied at various
                 grammatical levels, the rules elaborated for
                 semantically distinct verb classes, and a method
                 considering the hierarchy of concepts. The performance
                 of this module was evaluated on sentences from personal
                 stories about life experiences. The developed web-based
                 interface supports recognition of nine emotions,
                 positive and negative judgments, and positive and
                 negative appreciations conveyed in text. It allows
                 users to adjust parameters, to enable or disable
                 various functionality components of the algorithm, and
                 to select the format of text annotation and attitude
                 statistics visualization.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "48",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Song:2014:UGF,
  author =       "Yicheng Song and Yongdong Zhang and Juan Cao and
                 Jinhui Tang and Xingyu Gao and Jintao Li",
  title =        "A Unified Geolocation Framework for {Web} Videos",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "49:1--49:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2533989",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Jul 18 14:11:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we propose a unified geolocation
                 framework to automatically determine where on the earth
                 a web video was shot. We analyze different social,
                 visual, and textual relationships from a real-world
                 dataset and find four relationships with apparent
                 geography clues that can be used for web video
                 geolocation. Then, the geolocation process is
                 formulated as an optimization problem that
                 simultaneously takes the social, visual, and textual
                 relationships into consideration. The optimization
                 problem is solved by an iterative procedure, which can
                 be interpreted as a propagation of the geography
                 information among the web video social network.
                 Extensive experiments on a real-world dataset clearly
                 demonstrate the effectiveness of our proposed
                 framework, with the geolocation accuracy higher than
                 state-of-the-art approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "49",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhao:2014:PRL,
  author =       "Yi-Liang Zhao and Liqiang Nie and Xiangyu Wang and
                 Tat-Seng Chua",
  title =        "Personalized Recommendations of Locally Interesting
                 Venues to Tourists via Cross-Region Community
                 Matching",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "50:1--50:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532439",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Jul 18 14:11:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "You are in a new city. You are not familiar with the
                 places and neighborhoods. You want to know all about
                 the exciting sights, food outlets, and cultural venues
                 that the locals frequent, in particular those that suit
                 your personal interests. Even though there exist many
                 mapping, local search, and travel assistance sites,
                 they mostly provide popular and famous listings such as
                 Statue of Liberty and Eiffel Tower, which are
                 well-known places but may not suit your personal needs
                 or interests. Therefore, there is a gap between what
                 tourists want and what dominant tourism resources are
                 providing. In this work, we seek to provide a solution
                 to bridge this gap by exploiting the rich
                 user-generated location contents in location-based
                 social networks in order to offer tourists the most
                 relevant and personalized local venue recommendations.
                 In particular, we first propose a novel Bayesian
                 approach to extract the social dimensions of people at
                 different geographical regions to capture their latent
                 local interests. We next mine the local interest
                 communities in each geographical region. We then
                 represent each local community using aggregated
                 behaviors of community members. Finally, we correlate
                 communities across different regions and generate venue
                 recommendations to tourists via cross-region community
                 matching. We have sampled a representative subset of
                 check-ins from Foursquare and experimentally verified
                 the effectiveness of our proposed approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "50",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2014:VNF,
  author =       "Shuaiqiang Wang and Jiankai Sun and Byron J. Gao and
                 Jun Ma",
  title =        "{VSRank}: a Novel Framework for Ranking-Based
                 Collaborative Filtering",
  journal =      j-TIST,
  volume =       "5",
  number =       "3",
  pages =        "51:1--51:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542048",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Jul 18 14:11:13 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Collaborative filtering (CF) is an effective technique
                 addressing the information overload problem. CF
                 approaches generally fall into two categories: rating
                 based and ranking based. The former makes
                 recommendations based on historical rating scores of
                 items and the latter based on their rankings.
                 Ranking-based CF has demonstrated advantages in
                 recommendation accuracy, being able to capture the
                 preference similarity between users even if their
                 rating scores differ significantly. In this study, we
                 propose VSRank, a novel framework that seeks accuracy
                 improvement of ranking-based CF through adaptation of
                 the vector space model. In VSRank, we consider each
                 user as a document and his or her pairwise relative
                 preferences as terms. We then use a novel
                 degree-specialty weighting scheme resembling TF-IDF to
                 weight the terms. Extensive experiments on benchmarks
                 in comparison with the state-of-the-art approaches
                 demonstrate the promise of our approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "51",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Castells:2015:ISI,
  author =       "Pablo Castells and Jun Wang and Rub{\'e}n Lara and
                 Dell Zhang",
  title =        "Introduction to the Special Issue on Diversity and
                 Discovery in Recommender Systems",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "52:1--52:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668113",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "52",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ribeiro:2015:MPE,
  author =       "Marco Tulio Ribeiro and Nivio Ziviani and Edleno
                 {Silva De Moura} and Itamar Hata and Anisio Lacerda and
                 Adriano Veloso",
  title =        "Multiobjective {Pareto}-Efficient Approaches for
                 Recommender Systems",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "53:1--53:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629350",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Recommender systems are quickly becoming ubiquitous in
                 applications such as e-commerce, social media channels,
                 and content providers, among others, acting as an
                 enabling mechanism designed to overcome the information
                 overload problem by improving browsing and consumption
                 experience. A typical task in many recommender systems
                 is to output a ranked list of items, so that items
                 placed higher in the rank are more likely to be
                 interesting to the users. Interestingness measures
                 include how accurate, novel, and diverse are the
                 suggested items, and the objective is usually to
                 produce ranked lists optimizing one of these measures.
                 Suggesting items that are simultaneously accurate,
                 novel, and diverse is much more challenging, since this
                 may lead to a conflicting-objective problem, in which
                 the attempt to improve a measure further may result in
                 worsening other measures. In this article, we propose
                 new approaches for multiobjective recommender systems
                 based on the concept of Pareto efficiency-a state
                 achieved when the system is devised in the most
                 efficient manner in the sense that there is no way to
                 improve one of the objectives without making any other
                 objective worse off. Given that existing multiobjective
                 recommendation algorithms differ in their level of
                 accuracy, diversity, and novelty, we exploit the
                 Pareto-efficiency concept in two distinct manners: (i)
                 the aggregation of ranked lists produced by existing
                 algorithms into a single one, which we call
                 Pareto-efficient ranking, and (ii) the weighted
                 combination of existing algorithms resulting in a
                 hybrid one, which we call Pareto-efficient
                 hybridization. Our evaluation involves two real
                 application scenarios: music recommendation with
                 implicit feedback (i.e., Last.fm) and movie
                 recommendation with explicit feedback (i.e.,
                 MovieLens). We show that the proposed Pareto-efficient
                 approaches are effective in suggesting items that are
                 likely to be simultaneously accurate, diverse, and
                 novel. We discuss scenarios where the system achieves
                 high levels of diversity and novelty without
                 compromising its accuracy. Further, comparison against
                 multiobjective baselines reveals improvements in terms
                 of accuracy (from 10.4\% to 10.9\%), novelty (from
                 5.7\% to 7.5\%), and diversity (from 1.6\% to 4.2\%).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "53",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Adamopoulos:2015:URS,
  author =       "Panagiotis Adamopoulos and Alexander Tuzhilin",
  title =        "On Unexpectedness in Recommender Systems: Or How to
                 Better Expect the Unexpected",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "54:1--54:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2559952",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Although the broad social and business success of
                 recommender systems has been achieved across several
                 domains, there is still a long way to go in terms of
                 user satisfaction. One of the key dimensions for
                 significant improvement is the concept of
                 unexpectedness. In this article, we propose a method to
                 improve user satisfaction by generating unexpected
                 recommendations based on the utility theory of
                 economics. In particular, we propose a new concept of
                 unexpectedness as recommending to users those items
                 that depart from what they would expect from the system
                 --- the consideration set of each user. We define and
                 formalize the concept of unexpectedness and discuss how
                 it differs from the related notions of novelty,
                 serendipity, and diversity. In addition, we suggest
                 several mechanisms for specifying the users'
                 expectations and propose specific performance metrics
                 to measure the unexpectedness of recommendation lists.
                 We also take into consideration the quality of
                 recommendations using certain utility functions and
                 present an algorithm for providing users with
                 unexpected recommendations of high quality that are
                 hard to discover but fairly match their interests.
                 Finally, we conduct several experiments on
                 ``real-world'' datasets and compare our recommendation
                 results with other methods. The proposed approach
                 outperforms these baseline methods in terms of
                 unexpectedness and other important metrics, such as
                 coverage, aggregate diversity and dispersion, while
                 avoiding any accuracy loss.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "54",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Kucuktunc:2015:DCR,
  author =       "Onur K{\"u}{\c{c}}{\"u}ktun{\c{c}} and Erik Saule and
                 Kamer Kaya and {\"U}mit V. {\c{C}}ataly{\"u}rek",
  title =        "Diversifying Citation Recommendations",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "55:1--55:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668106",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Literature search is one of the most important steps
                 of academic research. With more than 100,000 papers
                 published each year just in computer science,
                 performing a complete literature search becomes a
                 Herculean task. Some of the existing approaches and
                 tools for literature search cannot compete with the
                 characteristics of today's literature, and they suffer
                 from ambiguity and homonymy. Techniques based on
                 citation information are more robust to the mentioned
                 issues. Thus, we recently built a Web service called
                 the advisor, which provides personalized
                 recommendations to researchers based on their papers of
                 interest. Since most recommendation methods may return
                 redundant results, diversifying the results of the
                 search process is necessary to increase the amount of
                 information that one can reach via an automated search.
                 This article targets the problem of result
                 diversification in citation-based bibliographic search,
                 assuming that the citation graph itself is the only
                 information available and no categories or intents are
                 known. The contribution of this work is threefold. We
                 survey various random walk--based diversification
                 methods and enhance them with the direction awareness
                 property to allow users to reach either old,
                 foundational (possibly well-cited and well-known)
                 research papers or recent (most likely less-known)
                 ones. Next, we propose a set of novel algorithms based
                 on vertex selection and query refinement. A set of
                 experiments with various evaluation criteria shows that
                 the proposed $ \gamma $-RLM algorithm performs better
                 than the existing approaches and is suitable for
                 real-time bibliographic search in practice.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "55",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Javari:2015:ANR,
  author =       "Amin Javari and Mahdi Jalili",
  title =        "Accurate and Novel Recommendations: an Algorithm Based
                 on Popularity Forecasting",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "56:1--56:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668107",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Recommender systems are in the center of network
                 science, and they are becoming increasingly important
                 in individual businesses for providing efficient,
                 personalized services and products to users. Previous
                 research in the field of recommendation systems focused
                 on improving the precision of the system through
                 designing more accurate recommendation lists. Recently,
                 the community has been paying attention to diversity
                 and novelty of recommendation lists as key
                 characteristics of modern recommender systems. In many
                 cases, novelty and precision do not go hand in hand,
                 and the accuracy--novelty dilemma is one of the
                 challenging problems in recommender systems, which
                 needs efforts in making a trade-off between them. In
                 this work, we propose an algorithm for providing novel
                 and accurate recommendation to users. We consider the
                 standard definition of accuracy and an effective
                 self-information--based measure to assess novelty of
                 the recommendation list. The proposed algorithm is
                 based on item popularity, which is defined as the
                 number of votes received in a certain time interval.
                 Wavelet transform is used for analyzing popularity time
                 series and forecasting their trend in future timesteps.
                 We introduce two filtering algorithms based on the
                 information extracted from analyzing popularity time
                 series of the items. The popularity-based filtering
                 algorithm gives a higher chance to items that are
                 predicted to be popular in future timesteps. The other
                 algorithm, denoted as a novelty and population-based
                 filtering algorithm, is to move toward items with low
                 popularity in past timesteps that are predicted to
                 become popular in the future. The introduced filters
                 can be applied as adds-on to any recommendation
                 algorithm. In this article, we use the proposed
                 algorithms to improve the performance of classic
                 recommenders, including item-based collaborative
                 filtering and Markov-based recommender systems. The
                 experiments show that the algorithms could
                 significantly improve both the accuracy and effective
                 novelty of the classic recommenders.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "56",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shen:2015:ISI,
  author =       "Dou Shen and Deepak Agarwal",
  title =        "Introduction to the Special Issue on Online
                 Advertising",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "57:1--57:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668123",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "57",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhu:2015:MMU,
  author =       "Hengshu Zhu and Enhong Chen and Hui Xiong and Kuifei
                 Yu and Huanhuan Cao and Jilei Tian",
  title =        "Mining Mobile User Preferences for Personalized
                 Context-Aware Recommendation",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "58:1--58:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532515",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Recent advances in mobile devices and their sensing
                 capabilities have enabled the collection of rich
                 contextual information and mobile device usage records
                 through the device logs. These context-rich logs open a
                 venue for mining the personal preferences of mobile
                 users under varying contexts and thus enabling the
                 development of personalized context-aware
                 recommendation and other related services, such as
                 mobile online advertising. In this article, we
                 illustrate how to extract personal context-aware
                 preferences from the context-rich device logs, or
                 context logs for short, and exploit these identified
                 preferences for building personalized context-aware
                 recommender systems. A critical challenge along this
                 line is that the context log of each individual user
                 may not contain sufficient data for mining his or her
                 context-aware preferences. Therefore, we propose to
                 first learn common context-aware preferences from the
                 context logs of many users. Then, the preference of
                 each user can be represented as a distribution of these
                 common context-aware preferences. Specifically, we
                 develop two approaches for mining common context-aware
                 preferences based on two different assumptions, namely,
                 context-independent and context-dependent assumptions,
                 which can fit into different application scenarios.
                 Finally, extensive experiments on a real-world dataset
                 show that both approaches are effective and outperform
                 baselines with respect to mining personal context-aware
                 preferences for mobile users.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "58",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ashkan:2015:LQA,
  author =       "Azin Ashkan and Charles L. A. Clarke",
  title =        "Location- and Query-Aware Modeling of Browsing and
                 Click Behavior in Sponsored Search",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "59:1--59:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2534398",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "An online advertisement's clickthrough rate provides a
                 fundamental measure of its quality, which is widely
                 used in ad selection strategies. Unfortunately, ads
                 placed in contexts where they are rarely viewed-or
                 where users are unlikely to be interested in commercial
                 results-may receive few clicks regardless of their
                 quality. In this article, we model the variability of a
                 user's browsing behavior for the purpose of click
                 analysis and prediction in sponsored search. Our model
                 incorporates several important contextual factors that
                 influence ad clickthrough rates, including the user's
                 query and ad placement on search engine result pages.
                 We formally model these factors with respect to the
                 list of ads displayed on a result page, the probability
                 that the user will initiate browsing of this list, and
                 the persistence of the user in browsing the list. We
                 incorporate these factors into existing click models by
                 augmenting them with appropriate query and location
                 biases. Using expectation maximization, we learn the
                 parameters of these augmented models from click signals
                 recorded in the logs of a commercial search engine. To
                 evaluate the performance of the models and to compare
                 them with state-of-the-art performance, we apply
                 standard evaluation metrics, including log-likelihood
                 and perplexity. Our evaluation results indicate that,
                 through the incorporation of query and location biases,
                 significant improvements can be achieved in predicting
                 browsing and click behavior in sponsored search. In
                 addition, we explore the extent to which these biases
                 actually reflect varying behavioral patterns. Our
                 observations confirm that correlations exist between
                 the biases and user search behavior.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "59",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Qin:2015:SSA,
  author =       "Tao Qin and Wei Chen and Tie-Yan Liu",
  title =        "Sponsored Search Auctions: Recent Advances and Future
                 Directions",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "60:1--60:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668108",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Sponsored search has been proven to be a successful
                 business model, and sponsored search auctions have
                 become a hot research direction. There have been many
                 exciting advances in this field, especially in recent
                 years, while at the same time, there are also many open
                 problems waiting for us to resolve. In this article, we
                 provide a comprehensive review of sponsored search
                 auctions in hopes of helping both industry
                 practitioners and academic researchers to become
                 familiar with this field, to know the state of the art,
                 and to identify future research topics. Specifically,
                 we organize the article into two parts. In the first
                 part, we review research works on sponsored search
                 auctions with basic settings, where fully rational
                 advertisers without budget constraints, preknown
                 click-through rates (CTRs) without interdependence, and
                 exact match between queries and keywords are assumed.
                 Under these assumptions, we first introduce the
                 generalized second price (GSP) auction, which is the
                 most popularly used auction mechanism in the industry.
                 Then we give the definitions of several well-studied
                 equilibria and review the latest results on GSP's
                 efficiency and revenue in these equilibria. In the
                 second part, we introduce some advanced topics on
                 sponsored search auctions. In these advanced topics,
                 one or more assumptions made in the basic settings are
                 relaxed. For example, the CTR of an ad could be unknown
                 and dependent on other ads; keywords could be broadly
                 matched to queries before auctions are executed; and
                 advertisers are not necessarily fully rational, could
                 have budget constraints, and may prefer rich bidding
                 languages. Given that the research on these advanced
                 topics is still immature, in each section of the second
                 part, we provide our opinions on how to make further
                 advances, in addition to describing what has been done
                 by researchers in the corresponding direction.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "60",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chapelle:2015:SSR,
  author =       "Olivier Chapelle and Eren Manavoglu and Romer
                 Rosales",
  title =        "Simple and Scalable Response Prediction for Display
                 Advertising",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "61:1--61:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532128",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Clickthrough and conversation rates estimation are two
                 core predictions tasks in display advertising. We
                 present in this article a machine learning framework
                 based on logistic regression that is specifically
                 designed to tackle the specifics of display
                 advertising. The resulting system has the following
                 characteristics: It is easy to implement and deploy, it
                 is highly scalable (we have trained it on terabytes of
                 data), and it provides models with state-of-the-art
                 accuracy.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "61",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Balakrishnan:2015:RTB,
  author =       "Raju Balakrishnan and Rushi P. Bhatt",
  title =        "Real-Time Bid Optimization for Group-Buying Ads",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "62:1--62:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532441",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Group-buying ads seeking a minimum number of customers
                 before the deal expiry are increasingly used by
                 daily-deal providers. Unlike traditional web ads, the
                 advertiser's profits for group-buying ads depend on the
                 time to expiry and additional customers needed to
                 satisfy the minimum group size. Since both these
                 quantities are time-dependent, optimal bid amounts to
                 maximize profits change with every impression.
                 Consequently, traditional static bidding strategies are
                 far from optimal. Instead, bid values need to be
                 optimized in real-time to maximize expected bidder
                 profits. This online optimization of deal profits is
                 made possible by the advent of ad exchanges offering
                 real-time (spot) bidding. To this end, we propose a
                 real-time bidding strategy for group-buying deals based
                 on the online optimization of bid values. We derive the
                 expected bidder profit of deals as a function of the
                 bid amounts and dynamically vary the bids to maximize
                 profits. Furthermore, to satisfy time constraints of
                 the online bidding, we present methods of minimizing
                 computation timings. Subsequently, we derive the
                 real-time ad selection, admissibility, and real-time
                 bidding of the traditional ads as the special cases of
                 the proposed method. We evaluate the proposed bidding,
                 selection, and admission strategies on a multimillion
                 click stream of 935 ads. The proposed real-time
                 bidding, selection, and admissibility show significant
                 profit increases over the existing strategies. Further
                 experiments illustrate the robustness of the bidding
                 and acceptable computation timings.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "62",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2015:IAC,
  author =       "Qingzhong Liu and Zhongxue Chen",
  title =        "Improved Approaches with Calibrated Neighboring Joint
                 Density to Steganalysis and Seam-Carved Forgery
                 Detection in {JPEG} Images",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "63:1--63:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2560365",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Steganalysis and forgery detection in image forensics
                 are generally investigated separately. We have designed
                 a method targeting the detection of both steganography
                 and seam-carved forgery in JPEG images. We analyze the
                 neighboring joint density of the DCT coefficients and
                 reveal the difference between the untouched image and
                 the modified version. In realistic detection, the
                 untouched image and the modified version may not be
                 obtained at the same time, and different JPEG images
                 may have different neighboring joint density features.
                 By exploring the self-calibration under different shift
                 recompressions, we propose calibrated neighboring joint
                 density-based approaches with a simple feature set to
                 distinguish steganograms and tampered images from
                 untouched ones. Our study shows that this approach has
                 multiple promising applications in image forensics.
                 Compared to the state-of-the-art steganalysis
                 detectors, our approach delivers better or comparable
                 detection performances with a much smaller feature set
                 while detecting several JPEG-based steganographic
                 systems including DCT-embedding-based adaptive
                 steganography and Yet Another Steganographic Scheme
                 (YASS). Our approach is also effective in detecting
                 seam-carved forgery in JPEG images. By integrating
                 calibrated neighboring density with spatial domain rich
                 models that were originally designed for steganalysis,
                 the hybrid approach obtains the best detection accuracy
                 to discriminate seam-carved forgery from an untouched
                 image. Our study also offers a promising manner to
                 explore steganalysis and forgery detection together.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "63",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Azaria:2015:SID,
  author =       "Amos Azaria and Zinovi Rabinovich and Claudia V.
                 Goldman and Sarit Kraus",
  title =        "Strategic Information Disclosure to People with
                 Multiple Alternatives",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "64:1--64:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2558397",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we study automated agents that are
                 designed to encourage humans to take some actions over
                 others by strategically disclosing key pieces of
                 information. To this end, we utilize the framework of
                 persuasion games-a branch of game theory that deals
                 with asymmetric interactions where one player (Sender)
                 possesses more information about the world, but it is
                 only the other player (Receiver) who can take an
                 action. In particular, we use an extended persuasion
                 model, where the Sender's information is imperfect and
                 the Receiver has more than two alternative actions
                 available. We design a computational algorithm that,
                 from the Sender's standpoint, calculates the optimal
                 information disclosure rule. The algorithm is
                 parameterized by the Receiver's decision model (i.e.,
                 what choice he will make based on the information
                 disclosed by the Sender) and can be retuned
                 accordingly. We then provide an extensive experimental
                 study of the algorithm's performance in interactions
                 with human Receivers. First, we consider a fully
                 rational (in the Bayesian sense) Receiver decision
                 model and experimentally show the efficacy of the
                 resulting Sender's solution in a routing domain.
                 Despite the discrepancy in the Sender's and the
                 Receiver's utilities from each of the Receiver's
                 choices, our Sender agent successfully persuaded human
                 Receivers to select an option more beneficial for the
                 agent. Dropping the Receiver's rationality assumption,
                 we introduce a machine learning procedure that
                 generates a more realistic human Receiver model. We
                 then show its significant benefit to the Sender
                 solution by repeating our routing experiment. To
                 complete our study, we introduce a second
                 (supply--demand) experimental domain and, by
                 contrasting it with the routing domain, obtain general
                 guidelines for a Sender on how to construct a Receiver
                 model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "64",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2015:SPA,
  author =       "Si Liu and Qiang Chen and Shuicheng Yan and Changsheng
                 Xu and Hanqing Lu",
  title =        "{Snap \& Play}: Auto-Generated Personalized
                 Find-the-Difference Game",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "65:1--65:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668109",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, by taking a popular game, the
                 Find-the-Difference (FiDi) game, as a concrete example,
                 we explore how state-of-the-art image processing
                 techniques can assist in developing a personalized,
                 automatic, and dynamic game. Unlike the traditional
                 FiDi game, where image pairs (source image and target
                 image) with five different patches are manually
                 produced by professional game developers, the proposed
                 Personalized FiDi (P-FiDi) electronic game can be
                 played in a fully automatic Snap \& Play mode. Snap
                 means that players first take photos with their digital
                 cameras. The newly captured photos are used as source
                 images and fed into the P-FiDi system to autogenerate
                 the counterpart target images for users to play. Four
                 steps are adopted to autogenerate target images:
                 enhancing the visual quality of source images,
                 extracting some changeable patches from the source
                 image, selecting the most suitable combination of
                 changeable patches and difference styles for the image,
                 and generating the differences on the target image with
                 state-of-the-art image processing techniques. In
                 addition, the P-FiDi game can be easily redesigned for
                 the im-game advertising. Extensive experiments show
                 that the P-FiDi electronic game is satisfying in terms
                 of player experience, seamless advertisement, and
                 technical feasibility.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "65",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Reches:2015:CCU,
  author =       "Shulamit Reches and Meir Kalech",
  title =        "Choosing a Candidate Using Efficient Allocation of
                 Biased Information",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "66:1--66:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2558327",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article deals with a decision-making problem
                 concerning an agent who wants to choose a partner from
                 multiple candidates for long-term collaboration. To
                 choose the best partner, the agent can rely on prior
                 information he knows about the candidates. However, to
                 improve his decision, he can request additional
                 information from information sources. Nonetheless,
                 acquiring information from external information sources
                 about candidates may be biased due to different
                 personalities of the agent searching for a partner and
                 the information source. In addition, information may be
                 costly. Considering the bias and the cost of the
                 information sources, the optimization problem addressed
                 in this article is threefold: (1) determining the
                 necessary amount of additional information, (2)
                 selecting information sources from which to request the
                 information, and (3) choosing the candidates on whom to
                 request the additional information. We propose a
                 heuristic to solve this optimization problem. The
                 results of experiments on simulated and real-world
                 domains demonstrate the efficiency of our algorithm.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "66",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhuang:2015:CDS,
  author =       "Jinfeng Zhuang and Tao Mei and Steven C. H. Hoi and
                 Xian-Sheng Hua and Yongdong Zhang",
  title =        "Community Discovery from Social Media by Low-Rank
                 Matrix Recovery",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "67:1--67:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668110",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The pervasive usage and reach of social media have
                 attracted a surge of attention in the multimedia
                 research community. Community discovery from social
                 media has therefore become an important yet challenging
                 issue. However, due to the subjective generating
                 process, the explicitly observed communities (e.g.,
                 group-user and user-user relationship) are often noisy
                 and incomplete in nature. This paper presents a novel
                 approach to discovering communities from social media,
                 including the group membership and user friend
                 structure, by exploring a low-rank matrix recovery
                 technique. In particular, we take Flickr as one
                 exemplary social media platform. We first model the
                 observed indicator matrix of the Flickr community as a
                 summation of a low-rank true matrix and a sparse error
                 matrix. We then formulate an optimization problem by
                 regularizing the true matrix to coincide with the
                 available rich context and content (i.e., photos and
                 their associated tags). An iterative algorithm is
                 developed to recover the true community indicator
                 matrix. The proposed approach leads to a variety of
                 social applications, including community visualization,
                 interest group refinement, friend suggestion, and
                 influential user identification. The evaluations on a
                 large-scale testbed, consisting of 4,919 Flickr users,
                 1,467 interest groups, and over five million photos,
                 show that our approach opens a new yet effective
                 perspective to solve social network problems with
                 sparse learning technique. Despite being focused on
                 Flickr, our technique can be applied in any other
                 social media community.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "67",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yang:2015:IPI,
  author =       "Yiyang Yang and Zhiguo Gong and Leong Hou U.",
  title =        "Identifying Points of Interest Using Heterogeneous
                 Features",
  journal =      j-TIST,
  volume =       "5",
  number =       "4",
  pages =        "68:1--68:??",
  month =        jan,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668111",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Wed Feb 11 12:29:09 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Deducing trip-related information from web-scale
                 datasets has received large amounts of attention
                 recently. Identifying points of interest (POIs) in
                 geo-tagged photos is one of these problems. The problem
                 can be viewed as a standard clustering problem of
                 partitioning two-dimensional objects. In this work, we
                 study spectral clustering, which is the first attempt
                 for the identification of POIs. However, there is no
                 unified approach to assigning the subjective clustering
                 parameters, and these parameters vary immensely in
                 different metropolitans and locations. To address this
                 issue, we study a self-tuning technique that can
                 properly determine the parameters for the clustering
                 needed. Besides geographical information, web photos
                 inherently store other rich information. Such
                 heterogeneous information can be used to enhance the
                 identification accuracy. Thereby, we study a novel
                 refinement framework that is based on the tightness and
                 cohesion degree of the additional information. We
                 thoroughly demonstrate our findings by web-scale
                 datasets collected from Flickr.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "68",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ji:2015:WLM,
  author =       "Rongrong Ji and Yue Gao and Wei Liu and Xing Xie and
                 Qi Tian and Xuelong Li",
  title =        "When Location Meets Social Multimedia: a Survey on
                 Vision-Based Recognition and Mining for Geo-Social
                 Multimedia Analytics",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2597181",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Coming with the popularity of multimedia sharing
                 platforms such as Facebook and Flickr, recent years
                 have witnessed an explosive growth of geographical tags
                 on social multimedia content. This trend enables a wide
                 variety of emerging applications, for example, mobile
                 location search, landmark recognition, scene
                 reconstruction, and touristic recommendation, which
                 range from purely research prototype to commercial
                 systems. In this article, we give a comprehensive
                 survey on these applications, covering recent advances
                 in recognition and mining of geographical-aware social
                 multimedia. We review related work in the past decade
                 regarding to location recognition, scene summarization,
                 tourism suggestion, 3D building modeling, mobile visual
                 search and city navigation. At the end, we further
                 discuss potential challenges, future topics, as well as
                 open issues related to geo-social multimedia computing,
                 recognition, mining, and analytics.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chin:2015:FPS,
  author =       "Wei-Sheng Chin and Yong Zhuang and Yu-Chin Juan and
                 Chih-Jen Lin",
  title =        "A Fast Parallel Stochastic Gradient Method for Matrix
                 Factorization in Shared Memory Systems",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668133",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Matrix factorization is known to be an effective
                 method for recommender systems that are given only the
                 ratings from users to items. Currently, stochastic
                 gradient (SG) method is one of the most popular
                 algorithms for matrix factorization. However, as a
                 sequential approach, SG is difficult to be parallelized
                 for handling web-scale problems. In this article, we
                 develop a fast parallel SG method, FPSG, for shared
                 memory systems. By dramatically reducing the cache-miss
                 rate and carefully addressing the load balance of
                 threads, FPSG is more efficient than state-of-the-art
                 parallel algorithms for matrix factorization.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Feuz:2015:TLA,
  author =       "Kyle D. Feuz and Diane J. Cook",
  title =        "Transfer Learning across Feature-Rich Heterogeneous
                 Feature Spaces via {Feature-Space Remapping (FSR)}",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629528",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Transfer learning aims to improve performance on a
                 target task by utilizing previous knowledge learned
                 from source tasks. In this paper we introduce a novel
                 heterogeneous transfer learning technique,
                 Feature-Space Remapping (FSR), which transfers
                 knowledge between domains with different feature
                 spaces. This is accomplished without requiring typical
                 feature-feature, feature instance, or instance-instance
                 co-occurrence data. Instead we relate features in
                 different feature-spaces through the construction of
                 metafeatures. We show how these techniques can utilize
                 multiple source datasets to construct an ensemble
                 learner which further improves performance. We apply
                 FSR to an activity recognition problem and a document
                 classification problem. The ensemble technique is able
                 to outperform all other baselines and even performs
                 better than a classifier trained using a large amount
                 of labeled data in the target domain. These problems
                 are especially difficult because, in addition to having
                 different feature-spaces, the marginal probability
                 distributions and the class labels are also different.
                 This work extends the state of the art in transfer
                 learning by considering large transfer across
                 dramatically different spaces.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Patel:2015:DSI,
  author =       "Dhaval Patel",
  title =        "On Discovery of Spatiotemporal Influence-Based Moving
                 Clusters",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2631926",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "A moving object cluster is a set of objects that move
                 close to each other for a long time interval. Existing
                 works have utilized object trajectories to discover
                 moving object clusters efficiently. In this article, we
                 define a spatiotemporal influence-based moving cluster
                 that captures spatiotemporal influence spread over a
                 set of spatial objects. A spatiotemporal
                 influence-based moving cluster is a sequence of spatial
                 clusters, where each cluster is a set of nearby
                 objects, such that each object in a cluster influences
                 at least one object in the next immediate cluster and
                 is also influenced by an object from the immediate
                 preceding cluster. Real-life examples of spatiotemporal
                 influence-based moving clusters include diffusion of
                 infectious diseases and spread of innovative ideas. We
                 study the discovery of spatiotemporal influence-based
                 moving clusters in a database of spatiotemporal events.
                 While the search space for discovering all
                 spatiotemporal influence-based moving clusters is
                 prohibitively huge, we design a method, STIMer, to
                 efficiently retrieve the maximal answer. The algorithm
                 STIMer adopts a top-down recursive refinement method to
                 generate the maximal spatiotemporal influence-based
                 moving clusters directly. Empirical studies on the real
                 data as well as large synthetic data demonstrate the
                 effectiveness and efficiency of our method.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sepehri-Rad:2015:ICW,
  author =       "Hoda Sepehri-Rad and Denilson Barbosa",
  title =        "Identifying Controversial {Wikipedia} Articles Using
                 Editor Collaboration Networks",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2630075",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Wikipedia is probably the most commonly used knowledge
                 reference nowadays, and the high quality of its
                 articles is widely acknowledged. Nevertheless,
                 disagreement among editors often causes some articles
                 to become controversial over time. These articles span
                 thousands of popular topics, including religion,
                 history, and politics, to name a few, and are manually
                 tagged as controversial by the editors, which is
                 clearly suboptimal. Moreover, disagreement, bias, and
                 conflict are expressed quite differently in Wikipedia
                 compared to other social media, rendering previous
                 approaches ineffective. On the other hand, the social
                 process of editing Wikipedia is partially captured in
                 the edit history of the articles, opening the door for
                 novel approaches. This article describes a novel
                 controversy model that builds on the interaction
                 history of the editors and not only predicts
                 controversy but also sheds light on the process that
                 leads to controversy. The model considers the
                 collaboration history of pairs of editors to predict
                 their attitude toward one another. This is done in a
                 supervised way, where the votes of Wikipedia
                 administrator elections are used as labels indicating
                 agreement (i.e., support vote) or disagreement (i.e.,
                 oppose vote). From each article, a collaboration
                 network is built, capturing the pairwise attitude among
                 editors, allowing the accurate detection of
                 controversy. Extensive experimental results establish
                 the superiority of this approach compared to previous
                 work and very competitive baselines on a wide range of
                 settings.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Changuel:2015:RSU,
  author =       "Sahar Changuel and Nicolas Labroche and Bernadette
                 Bouchon-Meunier",
  title =        "Resources Sequencing Using Automatic
                 Prerequisite--Outcome Annotation",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "6:1--6:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2505349",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The objective of any tutoring system is to provide
                 resources to learners that are adapted to their current
                 state of knowledge. With the availability of a large
                 variety of online content and the disjunctive nature of
                 results provided by traditional search engines, it
                 becomes crucial to provide learners with adapted
                 learning paths that propose a sequence of resources
                 that match their learning objectives. In an ideal case,
                 the sequence of documents provided to the learner
                 should be such that each new document relies on
                 concepts that have been already defined in previous
                 documents. Thus, the problem of determining an
                 effective learning path from a corpus of web documents
                 depends on the accurate identification of outcome and
                 prerequisite concepts in these documents and on their
                 ordering according to this information. Until now, only
                 a few works have been proposed to distinguish between
                 prerequisite and outcome concepts, and to the best of
                 our knowledge, no method has been introduced so far to
                 benefit from this information to produce a meaningful
                 learning path. To this aim, this article first
                 describes a concept annotation method that relies on
                 machine-learning techniques to predict the class of
                 each concept-prerequisite or outcome-on the basis of
                 contextual and local features. Then, this
                 categorization is exploited to produce an automatic
                 resource sequencing on the basis of different
                 representations and scoring functions that transcribe
                 the precedence relation between learning resources.
                 Experiments conducted on a real dataset built from
                 online resources show that our concept annotation
                 approach outperforms the baseline method and that the
                 learning paths automatically generated are consistent
                 with the ground truth provided by the author of the
                 online content.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ghosh:2015:MTD,
  author =       "Siddhartha Ghosh and Steve Reece and Alex Rogers and
                 Stephen Roberts and Areej Malibari and Nicholas R.
                 Jennings",
  title =        "Modeling the Thermal Dynamics of Buildings: a
                 Latent-Force- Model-Based Approach",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "7:1--7:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629674",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Minimizing the energy consumed by heating,
                 ventilation, and air conditioning (HVAC) systems of
                 residential buildings without impacting occupants'
                 comfort has been highlighted as an important artificial
                 intelligence (AI) challenge. Typically, approaches that
                 seek to address this challenge use a model that
                 captures the thermal dynamics within a building, also
                 referred to as a thermal model. Among thermal models,
                 gray-box models are a popular choice for modeling the
                 thermal dynamics of buildings. They combine knowledge
                 of the physical structure of a building with various
                 data-driven inputs and are accurate estimators of the
                 state (internal temperature). However, existing
                 gray-box models require a detailed specification of all
                 the physical elements that can affect the thermal
                 dynamics of a building a priori. This limits their
                 applicability, particularly in residential buildings,
                 where additional dynamics can be induced by human
                 activities such as cooking, which contributes
                 additional heat, or opening of windows, which leads to
                 additional leakage of heat. Since the incidence of
                 these additional dynamics is rarely known, their
                 combined effects cannot readily be accommodated within
                 existing models. To overcome this limitation and
                 improve the general applicability of gray-box models,
                 we introduce a novel model, which we refer to as a
                 latent force thermal model of the thermal dynamics of a
                 building, or LFM-TM. Our model is derived from an
                 existing gray-box thermal model, which is augmented
                 with an extra term referred to as the learned residual.
                 This term is capable of modeling the effect of any a
                 priori unknown additional dynamic, which, if not
                 captured, appears as a structure in a thermal model's
                 residual (the error induced by the model). More
                 importantly, the learned residual can also capture the
                 effects of physical elements such as a building's
                 envelope or the lags in a heating system, leading to a
                 significant reduction in complexity compared to
                 existing models. To evaluate the performance of LFM-TM,
                 we apply it to two independent data sources. The first
                 is an established dataset, referred to as the FlexHouse
                 data, which was previously used for evaluating the
                 efficacy of existing gray-box models [Bacher and Madsen
                 2011]. The second dataset consists of heating data
                 logged within homes located on the University of
                 Southampton campus, which were specifically
                 instrumented to collect data for our thermal modeling
                 experiments. On both datasets, we show that LFM-TM
                 outperforms existing models in its ability to
                 accurately fit the observed data, generate accurate
                 day-ahead internal temperature predictions, and explain
                 a large amount of the variability in the future
                 observations. This, along with the fact that we also
                 use a corresponding efficient sequential inference
                 scheme for LFM-TM, makes it an ideal candidate for
                 model-based predictive control, where having accurate
                 online predictions of internal temperatures is
                 essential for high-quality solutions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:SPL,
  author =       "Zhao Zhang and Cheng-Lin Liu and Ming-Bo Zhao",
  title =        "A Sparse Projection and Low-Rank Recovery Framework
                 for Handwriting Representation and Salient Stroke
                 Feature Extraction",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2601408",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we consider the problem of
                 simultaneous low-rank recovery and sparse projection.
                 More specifically, a new Robust Principal Component
                 Analysis (RPCA)-based framework called Sparse
                 Projection and Low-Rank Recovery (SPLRR) is proposed
                 for handwriting representation and salient stroke
                 feature extraction. In addition to achieving a low-rank
                 component encoding principal features and identify
                 errors or missing values from a given data matrix as
                 RPCA, SPLRR also learns a similarity-preserving sparse
                 projection for extracting salient stroke features and
                 embedding new inputs for classification. These
                 properties make SPLRR applicable for handwriting
                 recognition and stroke correction and enable online
                 computation. A cosine-similarity-style regularization
                 term is incorporated into the SPLRR formulation for
                 encoding the similarities of local handwriting
                 features. The sparse projection and low-rank recovery
                 are calculated from a convex minimization problem that
                 can be efficiently solved in polynomial time. Besides,
                 the supervised extension of SPLRR is also elaborated.
                 The effectiveness of our SPLRR is examined by extensive
                 handwritten digital repairing, stroke correction, and
                 recognition based on benchmark problems. Compared with
                 other related techniques, SPLRR delivers strong
                 generalization capability and state-of-the-art
                 performance for handwriting representation and
                 recognition.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Stapleton:2015:CST,
  author =       "Gem Stapleton and Beryl Plimmer and Aidan Delaney and
                 Peter Rodgers",
  title =        "Combining Sketching and Traditional Diagram Editing
                 Tools",
  journal =      j-TIST,
  volume =       "6",
  number =       "1",
  pages =        "10:1--10:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2631925",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Fri Mar 27 18:08:08 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The least cognitively demanding way to create a
                 diagram is to draw it with a pen. Yet there is also a
                 need for more formal visualizations, that is, diagrams
                 created using both traditional keyboard and mouse
                 interaction. Our objective is to allow the creation of
                 diagrams using traditional and stylus-based input.
                 Having two diagram creation interfaces requires that
                 changes to a diagram should be automatically rendered
                 in the other visualization. Because sketches are
                 imprecise, there is always the possibility that
                 conversion between visualizations results in a lack of
                 syntactic consistency between the two visualizations.
                 We propose methods for converting diagrams between
                 forms, checking them for equivalence, and rectifying
                 inconsistencies. As a result of our theoretical
                 contributions, we present an intelligent software
                 system allowing users to create and edit diagrams in
                 sketch or formal mode. Our proof-of-concept tool
                 supports diagrams with connected and spatial syntactic
                 elements. Two user studies show that this approach is
                 viable and participants found the software easy to use.
                 We conclude that supporting such diagram creation is
                 now possible in practice.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hong:2015:VUR,
  author =       "Richang Hong and Shuicheng Yan and Zhengyou Zhang",
  title =        "Visual Understanding with {RGB-D} Sensors: an
                 Introduction to the Special Issue",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2732265",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2015:KDR,
  author =       "Chongyu Chen and Jianfei Cai and Jianmin Zheng and Tat
                 Jen Cham and Guangming Shi",
  title =        "{Kinect} Depth Recovery Using a Color-Guided,
                 Region-Adaptive, and Depth-Selective Framework",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "12:1--12:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700475",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Considering that the existing depth recovery
                 approaches have different limitations when applied to
                 Kinect depth data, in this article, we propose to
                 integrate their effective features including adaptive
                 support region selection, reliable depth selection, and
                 color guidance together under an optimization framework
                 for Kinect depth recovery. In particular, we formulate
                 our depth recovery as an energy minimization problem,
                 which solves the depth hole filling and denoising
                 simultaneously. The energy function consists of a
                 fidelity term and a regularization term, which are
                 designed according to the Kinect characteristics. Our
                 framework inherits and improves the idea of guided
                 filtering by incorporating structure information and
                 prior knowledge of the Kinect noise model. Through
                 analyzing the solution to the optimization framework,
                 we also derive a local filtering version that provides
                 an efficient and effective way of improving the
                 existing filtering techniques. Quantitative evaluations
                 on our developed synthesized dataset and experiments on
                 real Kinect data show that the proposed method achieves
                 superior performance in terms of recovery accuracy and
                 visual quality.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Figueroa:2015:CAT,
  author =       "Nadia Figueroa and Haiwei Dong and Abdulmotaleb {El
                 Saddik}",
  title =        "A Combined Approach Toward Consistent Reconstructions
                 of Indoor Spaces Based on {$6$D RGB-D} Odometry and
                 {KinectFusion}",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "14:1--14:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629673",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We propose a 6D RGB-D odometry approach that finds the
                 relative camera pose between consecutive RGB-D frames
                 by keypoint extraction and feature matching both on the
                 RGB and depth image planes. Furthermore, we feed the
                 estimated pose to the highly accurate KinectFusion
                 algorithm, which uses a fast ICP (Iterative Closest
                 Point) to fine-tune the frame-to-frame relative pose
                 and fuse the depth data into a global implicit surface.
                 We evaluate our method on a publicly available RGB-D
                 SLAM benchmark dataset by Sturm et al. The experimental
                 results show that our proposed reconstruction method
                 solely based on visual odometry and KinectFusion
                 outperforms the state-of-the-art RGB-D SLAM system
                 accuracy. Moreover, our algorithm outputs a
                 ready-to-use polygon mesh (highly suitable for creating
                 3D virtual worlds) without any postprocessing steps.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zha:2015:RMF,
  author =       "Zheng-Jun Zha and Yang Yang and Jinhui Tang and Meng
                 Wang and Tat-Seng Chua",
  title =        "Robust Multiview Feature Learning for {RGB-D} Image
                 Understanding",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "15:1--15:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2735521",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The availability of massive RGB-depth (RGB-D) images
                 poses a compelling need for effective RGB-D content
                 understanding techniques. RGB-D images provide
                 synchronized information from multiple views (e.g.,
                 color and depth) of real-world objects and scenes. This
                 work proposes learning compact and discriminative
                 features from the multiple views of RGB-D content
                 toward effective feature representation for RGB-D image
                 understanding. In particular, a robust multiview
                 feature learning approach is developed, which exploits
                 the intrinsic relations among multiple views. The
                 feature learning in multiple views is jointly optimized
                 in an integrated formulation. The joint optimization
                 essentially exploits the intrinsic relations among the
                 views, leading to effective features and making the
                 learning process robust to noises. The feature learning
                 function is formulated as a robust nonnegative graph
                 embedding function over multiple graphs in various
                 views. The graphs characterize the local geometric and
                 discriminating structure of the multiview data. The
                 joint sparsity in $ l_1$-norm graph embedding and $
                 l_{21}$-norm data factorization further enhances the
                 robustness of feature learning. We derive an efficient
                 computational solution for the proposed approach and
                 provide rigorous theoretical proof with regard to its
                 convergence. We apply the proposed approach to two
                 RGB-D image understanding tasks: RGB-D object
                 classification and RGB-D scene categorization. We
                 conduct extensive experiments on two real-world RGB-D
                 image datasets. The experimental results have
                 demonstrated the effectiveness of the proposed
                 approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:RDI,
  author =       "Quanshi Zhang and Xuan Song and Xiaowei Shao and
                 Huijing Zhao and Ryosuke Shibasaki",
  title =        "From {RGB-D} Images to {RGB} Images: Single Labeling
                 for Mining Visual Models",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "16:1--16:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629701",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Mining object-level knowledge, that is, building a
                 comprehensive category model base, from a large set of
                 cluttered scenes presents a considerable challenge to
                 the field of artificial intelligence. How to initiate
                 model learning with the least human supervision (i.e.,
                 manual labeling) and how to encode the structural
                 knowledge are two elements of this challenge, as they
                 largely determine the scalability and applicability of
                 any solution. In this article, we propose a
                 model-learning method that starts from a single-labeled
                 object for each category, and mines further model
                 knowledge from a number of informally captured,
                 cluttered scenes. However, in these scenes, target
                 objects are relatively small and have large variations
                 in texture, scale, and rotation. Thus, to reduce the
                 model bias normally associated with less supervised
                 learning methods, we use the robust 3D shape in RGB-D
                 images to guide our model learning, then apply the
                 properly trained category models to both object
                 detection and recognition in more conventional RGB
                 images. In addition to model training for their own
                 categories, the knowledge extracted from the RGB-D
                 images can also be transferred to guide model learning
                 for a new category, in which only RGB images without
                 depth information in the new category are provided for
                 training. Preliminary testing shows that the proposed
                 method performs as well as fully supervised learning
                 methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Huang:2015:ARM,
  author =       "Meiyu Huang and Yiqiang Chen and Wen Ji and Chunyan
                 Miao",
  title =        "Accurate and Robust Moving-Object Segmentation for
                 Telepresence Systems",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "17:1--17:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629480",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Moving-object segmentation is the key issue of
                 Telepresence systems. With monocular camera--based
                 segmentation methods, desirable segmentation results
                 are hard to obtain in challenging scenes with ambiguous
                 color, illumination changes, and shadows. Approaches
                 based on depth sensors often cause holes inside the
                 object and missegmentations on the object boundary due
                 to inaccurate and unstable estimation of depth data.
                 This work proposes an adaptive multi-cue decision
                 fusion method based on Kinect (which integrates a depth
                 sensor with an RGB camera). First, the algorithm
                 obtains an initial foreground mask based on the depth
                 cue. Second, the algorithm introduces a postprocessing
                 framework to refine the segmentation results, which
                 consists of two main steps: (1) automatically adjusting
                 the weight of two weak decisions to identify foreground
                 holes based on the color and contrast cue separately;
                 and (2) refining the object boundary by integrating the
                 motion probability weighted temporal prior, color
                 likelihood, and smoothness constraint. The extensive
                 experiments we conducted demonstrate that our method
                 can segment moving objects accurately and robustly in
                 various situations in real time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhu:2015:FMF,
  author =       "Yu Zhu and Wenbin Chen and Guodong Guo",
  title =        "Fusing Multiple Features for Depth-Based Action
                 Recognition",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "18:1--18:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629483",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Human action recognition is a very active research
                 topic in computer vision and pattern recognition.
                 Recently, it has shown a great potential for human
                 action recognition using the three-dimensional (3D)
                 depth data captured by the emerging RGB-D sensors.
                 Several features and/or algorithms have been proposed
                 for depth-based action recognition. A question is
                 raised: Can we find some complementary features and
                 combine them to improve the accuracy significantly for
                 depth-based action recognition? To address the question
                 and have a better understanding of the problem, we
                 study the fusion of different features for depth-based
                 action recognition. Although data fusion has shown
                 great success in other areas, it has not been well
                 studied yet on 3D action recognition. Some issues need
                 to be addressed, for example, whether the fusion is
                 helpful or not for depth-based action recognition, and
                 how to do the fusion properly. In this article, we
                 study different fusion schemes comprehensively, using
                 diverse features for action characterization in depth
                 videos. Two different levels of fusion schemes are
                 investigated, that is, feature level and decision
                 level. Various methods are explored at each fusion
                 level. Four different features are considered to
                 characterize the depth action patterns from different
                 aspects. The experiments are conducted on four
                 challenging depth action databases, in order to
                 evaluate and find the best fusion methods generally.
                 Our experimental results show that the four different
                 features investigated in the article can complement
                 each other, and appropriate fusion methods can improve
                 the recognition accuracies significantly over each
                 individual feature. More importantly, our fusion-based
                 action recognition outperforms the state-of-the-art
                 approaches on these challenging databases.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Spurlock:2015:EGD,
  author =       "Scott Spurlock and Richard Souvenir",
  title =        "An Evaluation of Gamesourced Data for Human Pose
                 Estimation",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "19:1--19:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629465",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Gamesourcing has emerged as an approach for rapidly
                 acquiring labeled data for learning-based, computer
                 vision recognition algorithms. In this article, we
                 present an approach for using RGB-D sensors to acquire
                 annotated training data for human pose estimation from
                 2D images. Unlike other gamesourcing approaches, our
                 method does not require a specific game, but runs
                 alongside any gesture-based game using RGB-D sensors.
                 The automatically generated datasets resulting from
                 this approach contain joint estimates within a few
                 pixel units of manually labeled data, and a gamesourced
                 dataset created using a relatively small number of
                 players, games, and locations performs as well as
                 large-scale, manually annotated datasets when used as
                 training data with recent learning-based human pose
                 estimation methods for 2D images.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sun:2015:LSV,
  author =       "Chao Sun and Tianzhu Zhang and Changsheng Xu",
  title =        "Latent Support Vector Machine Modeling for Sign
                 Language Recognition with {Kinect}",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "20:1--20:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629481",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Vision-based sign language recognition has attracted
                 more and more interest from researchers in the computer
                 vision field. In this article, we propose a novel
                 algorithm to model and recognize sign language
                 performed in front of a Microsoft Kinect sensor. Under
                 the assumption that some frames are expected to be both
                 discriminative and representative in a sign language
                 video, we first assign a binary latent variable to each
                 frame in training videos for indicating its
                 discriminative capability, then develop a latent
                 support vector machine model to classify the signs, as
                 well as localize the discriminative and representative
                 frames in each video. In addition, we utilize the depth
                 map together with the color image captured by the
                 Kinect sensor to obtain a more effective and accurate
                 feature to enhance the recognition accuracy. To
                 evaluate our approach, we conducted experiments on both
                 word-level sign language and sentence-level sign
                 language. An American Sign Language dataset including
                 approximately 2,000 word-level sign language phrases
                 and 2,000 sentence-level sign language phrases was
                 collected using the Kinect sensor, and each phrase
                 contains color, depth, and skeleton information.
                 Experiments on our dataset demonstrate the
                 effectiveness of the proposed method for sign language
                 recognition.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tang:2015:RTH,
  author =       "Ao Tang and Ke Lu and Yufei Wang and Jie Huang and
                 Houqiang Li",
  title =        "A Real-Time Hand Posture Recognition System Using Deep
                 Neural Networks",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "21:1--21:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2735952",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Hand posture recognition (HPR) is quite a challenging
                 task, due to both the difficulty in detecting and
                 tracking hands with normal cameras and the limitations
                 of traditional manually selected features. In this
                 article, we propose a two-stage HPR system for Sign
                 Language Recognition using a Kinect sensor. In the
                 first stage, we propose an effective algorithm to
                 implement hand detection and tracking. The algorithm
                 incorporates both color and depth information, without
                 specific requirements on uniform-colored or stable
                 background. It can handle the situations in which hands
                 are very close to other parts of the body or hands are
                 not the nearest objects to the camera and allows for
                 occlusion of hands caused by faces or other hands. In
                 the second stage, we apply deep neural networks (DNNs)
                 to automatically learn features from hand posture
                 images that are insensitive to movement, scaling, and
                 rotation. Experiments verify that the proposed system
                 works quickly and accurately and achieves a recognition
                 accuracy as high as 98.12\%.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:RTS,
  author =       "Liyan Zhang and Fan Liu and Jinhui Tang",
  title =        "Real-Time System for Driver Fatigue Detection by
                 {RGB-D} Camera",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "22:1--22:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629482",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Drowsy driving is one of the major causes of fatal
                 traffic accidents. In this article, we propose a
                 real-time system that utilizes RGB-D cameras to
                 automatically detect driver fatigue and generate alerts
                 to drivers. By introducing RGB-D cameras, the depth
                 data can be obtained, which provides extra evidence to
                 benefit the task of head detection and head pose
                 estimation. In this system, two important visual cues
                 (head pose and eye state) for driver fatigue detection
                 are extracted and leveraged simultaneously. We first
                 present a real-time 3D head pose estimation method by
                 leveraging RGB and depth data. Then we introduce a
                 novel method to predict eye states employing the WLBP
                 feature, which is a powerful local image descriptor
                 that is robust to noise and illumination variations.
                 Finally, we integrate the results from both head pose
                 and eye states to generate the overall conclusion. The
                 combination and collaboration of the two types of
                 visual cues can reduce the uncertainties and resolve
                 the ambiguity that a single cue may induce. The
                 experiments were performed using an inside-car
                 environment during the day and night, and they fully
                 demonstrate the effectiveness and robustness of our
                 system as well as the proposed methods of predicting
                 head pose and eye states.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Kyan:2015:ABD,
  author =       "Matthew Kyan and Guoyu Sun and Haiyan Li and Ling
                 Zhong and Paisarn Muneesawang and Nan Dong and Bruce
                 Elder and Ling Guan",
  title =        "An Approach to Ballet Dance Training through {MS
                 Kinect} and Visualization in a {CAVE} Virtual Reality
                 Environment",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "23:1--23:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2735951",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article proposes a novel framework for the
                 real-time capture, assessment, and visualization of
                 ballet dance movements as performed by a student in an
                 instructional, virtual reality (VR) setting. The
                 acquisition of human movement data is facilitated by
                 skeletal joint tracking captured using the popular
                 Microsoft (MS) Kinect camera system, while instruction
                 and performance evaluation are provided in the form of
                 3D visualizations and feedback through a CAVE virtual
                 environment, in which the student is fully immersed.
                 The proposed framework is based on the unsupervised
                 parsing of ballet dance movement into a structured
                 posture space using the spherical self-organizing map
                 (SSOM). A unique feature descriptor is proposed to more
                 appropriately reflect the subtleties of ballet dance
                 movements, which are represented as gesture
                 trajectories through posture space on the SSOM. This
                 recognition subsystem is used to identify the category
                 of movement the student is attempting when prompted (by
                 a virtual instructor) to perform a particular dance
                 sequence. The dance sequence is then segmented and
                 cross-referenced against a library of gestural
                 components performed by the teacher. This facilitates
                 alignment and score-based assessment of individual
                 movements within the context of the dance sequence. An
                 immersive interface enables the student to review his
                 or her performance from a number of vantage points,
                 each providing a unique perspective and spatial context
                 suggestive of how the student might make improvements
                 in training. An evaluation of the recognition and
                 virtual feedback systems is presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shi:2015:ESC,
  author =       "Miaojing Shi and Xinghai Sun and Dacheng Tao and Chao
                 Xu and George Baciu and Hong Liu",
  title =        "Exploring Spatial Correlation for Visual Object
                 Retrieval",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "24:1--24:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2641576",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Bag-of-visual-words (BOVW)-based image representation
                 has received intense attention in recent years and has
                 improved content-based image retrieval (CBIR)
                 significantly. BOVW does not consider the spatial
                 correlation between visual words in natural images and
                 thus biases the generated visual words toward noise
                 when the corresponding visual features are not stable.
                 This article outlines the construction of a visual word
                 co-occurrence matrix by exploring visual word
                 co-occurrence extracted from small affine-invariant
                 regions in a large collection of natural images. Based
                 on this co-occurrence matrix, we first present a novel
                 high-order predictor to accelerate the generation of
                 spatially correlated visual words and a penalty tree
                 (PTree) to continue generating the words after the
                 prediction. Subsequently, we propose two methods of
                 co-occurrence weighting similarity measure for image
                 ranking: Co-Cosine and Co-TFIDF. These two new schemes
                 down-weight the contributions of the words that are
                 less discriminative because of frequent co-occurrences
                 with other words. We conduct experiments on Oxford and
                 Paris Building datasets, in which the ImageNet dataset
                 is used to implement a large-scale evaluation.
                 Cross-dataset evaluations between the Oxford and Paris
                 datasets and Oxford and Holidays datasets are also
                 provided. Thorough experimental results suggest that
                 our method outperforms the state of the art without
                 adding much additional cost to the BOVW model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Doherty:2015:PMT,
  author =       "Jonathan Doherty and Kevin Curran and Paul McKevitt",
  title =        "Pattern Matching Techniques for Replacing Missing
                 Sections of Audio Streamed across Wireless Networks",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "25:1--25:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2663358",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Streaming media on the Internet can be unreliable.
                 Services such as audio-on-demand drastically increase
                 the loads on networks; therefore, new, robust, and
                 highly efficient coding algorithms are necessary. One
                 method overlooked to date, which can work alongside
                 existing audio compression schemes, is that which takes
                 into account the semantics and natural repetition of
                 music. Similarity detection within polyphonic audio has
                 presented problematic challenges within the field of
                 music information retrieval. One approach to deal with
                 bursty errors is to use self-similarity to replace
                 missing segments. Many existing systems exist based on
                 packet loss and replacement on a network level, but
                 none attempt repairs of large dropouts of 5 seconds or
                 more. Music exhibits standard structures that can be
                 used as a forward error correction (FEC) mechanism. FEC
                 is an area that addresses the issue of packet loss with
                 the onus of repair placed as much as possible on the
                 listener's device. We have developed a
                 server--client-based framework (SoFI) for automatic
                 detection and replacement of large packet losses on
                 wireless networks when receiving time-dependent
                 streamed audio. Whenever dropouts occur, SoFI swaps
                 audio presented to the listener between a live stream
                 and previous sections of the audio stored locally.
                 Objective and subjective evaluations of SoFI where
                 subjects were presented with other simulated approaches
                 to audio repair together with simulations of
                 replacements including varying lengths of time in the
                 repair give positive results.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hai:2015:ABU,
  author =       "Zhen Hai and Kuiyu Chang and Gao Cong and Christopher
                 C. Yang",
  title =        "An Association-Based Unified Framework for Mining
                 Features and Opinion Words",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "26:1--26:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2663359",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Mining features and opinion words is essential for
                 fine-grained opinion analysis of customer reviews. It
                 is observed that semantic dependencies naturally exist
                 between features and opinion words, even among features
                 or opinion words themselves. In this article, we employ
                 a corpus statistics association measure to quantify the
                 pairwise word dependencies and propose a generalized
                 association-based unified framework to identify
                 features, including explicit and implicit features, and
                 opinion words from reviews. We first extract explicit
                 features and opinion words via an association-based
                 bootstrapping method (ABOOT). ABOOT starts with a small
                 list of annotated feature seeds and then iteratively
                 recognizes a large number of domain-specific features
                 and opinion words by discovering the corpus statistics
                 association between each pair of words on a given
                 review domain. Two instances of this ABOOT method are
                 evaluated based on two particular association models,
                 likelihood ratio tests (LRTs) and latent semantic
                 analysis (LSA). Next, we introduce a natural extension
                 to identify implicit features by employing the
                 recognized known semantic correlations between features
                 and opinion words. Experimental results illustrate the
                 benefits of the proposed association-based methods for
                 identifying features and opinion words versus benchmark
                 methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Huang:2015:HMC,
  author =       "Shanshan Huang and Jun Ma and Peizhe Cheng and
                 Shuaiqiang Wang",
  title =        "A {Hybrid Multigroup CoClustering} Recommendation
                 Framework Based on Information Fusion",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "27:1--27:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700465",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Collaborative Filtering (CF) is one of the most
                 successful algorithms in recommender systems. However,
                 it suffers from data sparsity and scalability problems.
                 Although many clustering techniques have been
                 incorporated to alleviate these two problems, most of
                 them fail to achieve further significant improvement in
                 recommendation accuracy. First of all, most of them
                 assume each user or item belongs to a single cluster.
                 Since usually users can hold multiple interests and
                 items may belong to multiple categories, it is more
                 reasonable to assume that users and items can join
                 multiple clusters (groups), where each cluster is a
                 subset of like-minded users and items they prefer.
                 Furthermore, most of the clustering-based CF models
                 only utilize historical rating information in the
                 clustering procedure but ignore other data resources in
                 recommender systems such as the social connections of
                 users and the correlations between items. In this
                 article, we propose HMCoC, a Hybrid Multigroup
                 CoClustering recommendation framework, which can
                 cluster users and items into multiple groups
                 simultaneously with different information resources. In
                 our framework, we first integrate information of
                 user--item rating records, user social networks, and
                 item features extracted from the DBpedia knowledge
                 base. We then use an optimization method to mine
                 meaningful user--item groups with all the information.
                 Finally, we apply the conventional CF method in each
                 cluster to make predictions. By merging the predictions
                 from each cluster, we generate the top-n
                 recommendations to the target users for return.
                 Extensive experimental results demonstrate the superior
                 performance of our approach in top-n recommendation in
                 terms of MAP, NDCG, and F1 compared with other
                 clustering-based CF models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Fire:2015:DMO,
  author =       "Michael Fire and Yuval Elovici",
  title =        "Data Mining of Online Genealogy Datasets for Revealing
                 Lifespan Patterns in Human Population",
  journal =      j-TIST,
  volume =       "6",
  number =       "2",
  pages =        "28:1--28:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700464",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Tue Apr 21 11:29:25 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Online genealogy datasets contain extensive
                 information about millions of people and their past and
                 present family connections. This vast amount of data
                 can help identify various patterns in the human
                 population. In this study, we present methods and
                 algorithms that can assist in identifying variations in
                 lifespan distributions of the human population in the
                 past centuries, in detecting social and genetic
                 features that correlate with the human lifespan, and in
                 constructing predictive models of human lifespan based
                 on various features that can easily be extracted from
                 genealogy datasets. We have evaluated the presented
                 methods and algorithms on a large online genealogy
                 dataset with over a million profiles and over 9 million
                 connections, all of which were collected from the
                 WikiTree website. Our findings indicate that
                 significant but small positive correlations exist
                 between the parents' lifespan and their children's
                 lifespan. Additionally, we found slightly higher and
                 significant correlations between the lifespans of
                 spouses. We also discovered a very small positive and
                 significant correlation between longevity and
                 reproductive success in males, and a small and
                 significant negative correlation between longevity and
                 reproductive success in females. Moreover, our
                 predictive models presented results with a Mean
                 Absolute Error as low as 13.18 in predicting the
                 lifespans of individuals who outlived the age of 10,
                 and our classification models presented better than
                 random classification results in predicting which
                 people who outlive the age of 50 will also outlive the
                 age of 80. We believe that this study will be the first
                 of many studies to utilize the wealth of data on human
                 populations, existing in online genealogy datasets, to
                 better understand factors that influence the human
                 lifespan. Understanding these factors can assist
                 scientists in providing solutions for successful
                 aging.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zheng:2015:TDM,
  author =       "Yu Zheng",
  title =        "Trajectory Data Mining: an Overview",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "29:1--29:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2743025",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The advances in location-acquisition and mobile
                 computing techniques have generated massive spatial
                 trajectory data, which represent the mobility of a
                 diversity of moving objects, such as people, vehicles,
                 and animals. Many techniques have been proposed for
                 processing, managing, and mining trajectory data in the
                 past decade, fostering a broad range of applications.
                 In this article, we conduct a systematic survey on the
                 major research into trajectory data mining, providing a
                 panorama of the field as well as the scope of its
                 research topics. Following a road map from the
                 derivation of trajectory data, to trajectory data
                 preprocessing, to trajectory data management, and to a
                 variety of mining tasks (such as trajectory pattern
                 mining, outlier detection, and trajectory
                 classification), the survey explores the connections,
                 correlations, and differences among these existing
                 techniques. This survey also introduces the methods
                 that transform trajectories into other data formats,
                 such as graphs, matrices, and tensors, to which more
                 data mining and machine learning techniques can be
                 applied. Finally, some public trajectory datasets are
                 presented. This survey can help shape the field of
                 trajectory data mining, providing a quick understanding
                 of this field to the community.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bouguessa:2015:IAO,
  author =       "Mohamed Bouguessa and Lotfi Ben Romdhane",
  title =        "Identifying Authorities in Online Communities",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "30:1--30:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700481",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Several approaches have been proposed for the problem
                 of identifying authoritative actors in online
                 communities. However, the majority of existing methods
                 suffer from one or more of the following limitations:
                 (1) There is a lack of an automatic mechanism to
                 formally discriminate between authoritative and
                 nonauthoritative users. In fact, a common approach to
                 authoritative user identification is to provide a
                 ranked list of users expecting authorities to come
                 first. A major problem of such an approach is the
                 question of where to stop reading the ranked list of
                 users. How many users should be chosen as
                 authoritative? (2) Supervised learning approaches for
                 authoritative user identification suffer from their
                 dependency on the training data. The problem here is
                 that labeled samples are more difficult, expensive, and
                 time consuming to obtain than unlabeled ones. (3)
                 Several approaches rely on some user parameters to
                 estimate an authority score. Detection accuracy of
                 authoritative users can be seriously affected if
                 incorrect values are used. In this article, we propose
                 a parameterless mixture model-based approach that is
                 capable of addressing the three aforementioned issues
                 in a single framework. In our approach, we first
                 represent each user with a feature vector composed of
                 information related to its social behavior and activity
                 in an online community. Next, we propose a statistical
                 framework, based on the multivariate beta mixtures, in
                 order to model the estimated set of feature vectors.
                 The probability density function is therefore estimated
                 and the beta component that corresponds to the most
                 authoritative users is identified. The suitability of
                 the proposed approach is illustrated on real data
                 extracted from the Stack Exchange question-answering
                 network and Twitter.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Lee:2015:WWR,
  author =       "Kyumin Lee and Jalal Mahmud and Jilin Chen and
                 Michelle Zhou and Jeffrey Nichols",
  title =        "Who Will Retweet This? {Detecting} Strangers from
                 {Twitter} to Retweet Information",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "31:1--31:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700466",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "There has been much effort on studying how social
                 media sites, such as Twitter, help propagate
                 information in different situations, including
                 spreading alerts and SOS messages in an emergency.
                 However, existing work has not addressed how to
                 actively identify and engage the right strangers at the
                 right time on social media to help effectively
                 propagate intended information within a desired time
                 frame. To address this problem, we have developed three
                 models: (1) a feature-based model that leverages
                 people's exhibited social behavior, including the
                 content of their tweets and social interactions, to
                 characterize their willingness and readiness to
                 propagate information on Twitter via the act of
                 retweeting; (2) a wait-time model based on a user's
                 previous retweeting wait times to predict his or her
                 next retweeting time when asked; and (3) a subset
                 selection model that automatically selects a subset of
                 people from a set of available people using
                 probabilities predicted by the feature-based model and
                 maximizes retweeting rate. Based on these three models,
                 we build a recommender system that predicts the
                 likelihood of a stranger to retweet information when
                 asked, within a specific time window, and recommends
                 the top-N qualified strangers to engage with. Our
                 experiments, including live studies in the real world,
                 demonstrate the effectiveness of our work.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hirschprung:2015:SDD,
  author =       "Ron Hirschprung and Eran Toch and Oded Maimon",
  title =        "Simplifying Data Disclosure Configurations in a Cloud
                 Computing Environment",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "32:1--32:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700472",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Cloud computing offers a compelling vision of
                 computation, enabling an unprecedented level of data
                 distribution and sharing. Beyond improving the
                 computing infrastructure, cloud computing enables a
                 higher level of interoperability between information
                 systems, simplifying tasks such as sharing documents
                 between coworkers or enabling collaboration between an
                 organization and its suppliers. While these abilities
                 may result in significant benefits to users and
                 organizations, they also present privacy challenges due
                 to unwanted exposure of sensitive information. As
                 information-sharing processes in cloud computing are
                 complex and domain specific, configuring these
                 processes can be an overwhelming and burdensome task
                 for users. This article investigates the feasibility of
                 configuring sharing processes through a small and
                 representative set of canonical configuration options.
                 For this purpose, we present a generic method, named
                 SCON-UP (Simplified CON-figuration of User
                 Preferences). SCON-UP simplifies configuration
                 interfaces by using a clustering algorithm that
                 analyzes a massive set of sharing preferences and
                 condenses them into a small number of discrete
                 disclosure levels. Thus, the user is provided with a
                 usable configuration model while guaranteeing adequate
                 privacy control. We describe the algorithm and
                 empirically evaluate our model using data collected in
                 two user studies (n = 121 and n = 352). Our results
                 show that when provided with three canonical
                 configuration options, on average, 82\% of the
                 population can be covered by at least one option. We
                 exemplify the feasibility of discretizing sharing
                 levels and discuss the tradeoff between coverage and
                 simplicity in discrete configuration options.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Elbadrawy:2015:USF,
  author =       "Asmaa Elbadrawy and George Karypis",
  title =        "User-Specific Feature-Based Similarity Models for
                 Top-$n$ Recommendation of New Items",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "33:1--33:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700495",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Recommending new items for suitable users is an
                 important yet challenging problem due to the lack of
                 preference history for the new items. Noncollaborative
                 user modeling techniques that rely on the item features
                 can be used to recommend new items. However, they only
                 use the past preferences of each user to provide
                 recommendations for that user. They do not utilize
                 information from the past preferences of other users,
                 which can potentially be ignoring useful information.
                 More recent factor models transfer knowledge across
                 users using their preference information in order to
                 provide more accurate recommendations. These methods
                 learn a low-rank approximation for the preference
                 matrix, which can lead to loss of information.
                 Moreover, they might not be able to learn useful
                 patterns given very sparse datasets. In this work, we
                 present {{\sc UFSM}, a method for top-$n$
                 recommendation of new items given binary user
                 preferences. {\sc UFSM} learns {{\bf U}ser}-specific
                 {\bf F}eature}-based item-{\bf S}imilarity {\bf
                 M}odels, and its strength lies in combining two points:
                 (1) exploiting preference information across all users
                 to learn multiple global item similarity functions and
                 (2) learning user-specific weights that determine the
                 contribution of each global similarity function in
                 generating recommendations for each user. {\sc UFSM}
                 can be considered as a sparse high-dimensional factor
                 model where the previous preferences of each user are
                 incorporated within his or her latent representation.
                 This way, {\sc UFSM} combines the merits of item
                 similarity models that capture local relations among
                 items and factor models that learn global preference
                 patterns. A comprehensive set of experiments was
                 conduced to compare {\sc UFSM} against state-of-the-art
                 collaborative factor models and noncollaborative user
                 modeling techniques. Results show that {\sc UFSM}
                 outperforms other techniques in terms of recommendation
                 quality. {\sc UFSM} manages to yield better
                 recommendations even with very sparse datasets. Results
                 also show that {\sc UFSM} can efficiently handle
                 high-dimensional as well as low-dimensional item
                 feature spaces.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:TGO,
  author =       "Mingjin Zhang and Huibo Wang and Yun Lu and Tao Li and
                 Yudong Guang and Chang Liu and Erik Edrosa and Hongtai
                 Li and Naphtali Rishe",
  title =        "{TerraFly GeoCloud}: an Online Spatial Data Analysis
                 and Visualization System",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "34:1--34:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700494",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With the exponential growth of the usage of web map
                 services, geo-data analysis has become more and more
                 popular. This article develops an online spatial data
                 analysis and visualization system, TerraFly GeoCloud,
                 which helps end-users visualize and analyze spatial
                 data and share the analysis results. Built on the
                 TerraFly Geo spatial database, TerraFly GeoCloud is an
                 extra layer running upon the TerraFly map and can
                 efficiently support many different visualization
                 functions and spatial data analysis models.
                 Furthermore, users can create unique URLs to visualize
                 and share the analysis results. TerraFly GeoCloud also
                 enables the MapQL technology to customize map
                 visualization using SQL-like statements. The system is
                 available at http://terrafly.fiu.edu/GeoCloud/.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "34",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2015:SCP,
  author =       "Yi-Cheng Chen and Wen-Chih Peng and Jiun-Long Huang
                 and Wang-Chien Lee",
  title =        "Significant Correlation Pattern Mining in Smart
                 Homes",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "35:1--35:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700484",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Owing to the great advent of sensor technology, the
                 usage data of appliances in a house can be logged and
                 collected easily today. However, it is a challenge for
                 the residents to visualize how these appliances are
                 used. Thus, mining algorithms are much needed to
                 discover appliance usage patterns. Most previous
                 studies on usage pattern discovery are mainly focused
                 on analyzing the patterns of single appliance rather
                 than mining the usage correlation among appliances. In
                 this article, a novel algorithm, namely Correlation
                 Pattern Miner (CoPMiner), is developed to capture the
                 usage patterns and correlations among appliances
                 probabilistically. CoPMiner also employs four pruning
                 techniques and a statistical model to reduce the search
                 space and filter out insignificant patterns,
                 respectively. Furthermore, the proposed algorithm is
                 applied on a real-world dataset to show the
                 practicability of correlation pattern mining.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "35",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Guo:2015:ISI,
  author =       "Bin Guo and Alvin Chin and Zhiwen Yu and Runhe Huang
                 and Daqing Zhang",
  title =        "An Introduction to the Special Issue on Participatory
                 Sensing and Crowd Intelligence",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "36:1--36:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2745712",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "36",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:SPU,
  author =       "Fuzheng Zhang and Nicholas Jing Yuan and David Wilkie
                 and Yu Zheng and Xing Xie",
  title =        "Sensing the Pulse of Urban Refueling Behavior: a
                 Perspective from Taxi Mobility",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "37:1--37:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2644828",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Urban transportation is an important factor in energy
                 consumption and pollution, and is of increasing concern
                 due to its complexity and economic significance. Its
                 importance will only increase as urbanization continues
                 around the world. In this article, we explore drivers'
                 refueling behavior in urban areas. Compared to
                 questionnaire-based methods of the past, we propose a
                 complete data-driven system that pushes towards
                 real-time sensing of individual refueling behavior and
                 citywide petrol consumption. Our system provides the
                 following: detection of individual refueling events
                 (REs) from which refueling preference can be analyzed;
                 estimates of gas station wait times from which
                 recommendations can be made; an indication of overall
                 fuel demand from which macroscale economic decisions
                 can be made, and a spatial, temporal, and economic view
                 of urban refueling characteristics. For individual
                 behavior, we use reported trajectories from a fleet of
                 GPS-equipped taxicabs to detect gas station visits. For
                 time spent estimates, to solve the sparsity issue along
                 time and stations, we propose context-aware tensor
                 factorization (CATF), a factorization model that
                 considers a variety of contextual factors (e.g., price,
                 brand, and weather condition) that affect consumers'
                 refueling decision. For fuel demand estimates, we apply
                 a queue model to calculate the overall visits based on
                 the time spent inside the station. We evaluated our
                 system on large-scale and real-world datasets, which
                 contain 4-month trajectories of 32,476 taxicabs, 689
                 gas stations, and the self-reported refueling details
                 of 8,326 online users. The results show that our system
                 can determine REs with an accuracy of more than 90\%,
                 estimate time spent with less than 2 minutes of error,
                 and measure overall visits in the same order of
                 magnitude with the records in the field study.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "37",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Tangmunarunkit:2015:OGE,
  author =       "H. Tangmunarunkit and C. K. Hsieh and B. Longstaff and
                 S. Nolen and J. Jenkins and C. Ketcham and J. Selsky
                 and F. Alquaddoomi and D. George and J. Kang and Z.
                 Khalapyan and J. Ooms and N. Ramanathan and D. Estrin",
  title =        "{Ohmage}: a General and Extensible End-to-End
                 Participatory Sensing Platform",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "38:1--38:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2717318",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Participatory sensing (PS) is a distributed data
                 collection and analysis approach where individuals,
                 acting alone or in groups, use their personal mobile
                 devices to systematically explore interesting aspects
                 of their lives and communities [Burke et al. 2006].
                 These mobile devices can be used to capture diverse
                 spatiotemporal data through both intermittent
                 self-report and continuous recording from on-board
                 sensors and applications. Ohmage (http://ohmage.org) is
                 a modular and extensible open-source, mobile to Web PS
                 platform that records, stores, analyzes, and visualizes
                 data from both prompted self-report and continuous data
                 streams. These data streams are authorable and can
                 dynamically be deployed in diverse settings. Feedback
                 from hundreds of behavioral and technology researchers,
                 focus group participants, and end users has been
                 integrated into ohmage through an iterative
                 participatory design process. Ohmage has been used as
                 an enabling platform in more than 20 independent
                 projects in many disciplines. We summarize the PS
                 requirements, challenges and key design objectives
                 learned through our design process, and ohmage system
                 architecture to achieve those objectives. The
                 flexibility, modularity, and extensibility of ohmage in
                 supporting diverse deployment settings are presented
                 through three distinct case studies in education,
                 health, and clinical research.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "38",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Xiong:2015:EEE,
  author =       "Haoyi Xiong and Daqing Zhang and Leye Wang and J. Paul
                 Gibson and Jie Zhu",
  title =        "{EEMC}: Enabling Energy-Efficient Mobile Crowdsensing
                 with Anonymous Participants",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "39:1--39:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2644827",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Mobile Crowdsensing (MCS) requires users to be
                 motivated to participate. However, concerns regarding
                 energy consumption and privacy-among other things-may
                 compromise their willingness to join such a crowd. Our
                 preliminary observations and analysis of common MCS
                 applications have shown that the data transfer in MCS
                 applications may incur significant energy consumption
                 due to the 3G connection setup. However, if data are
                 transferred in parallel with a traditional phone call,
                 then such transfer can be done almost ``for free'':
                 with only an insignificant additional amount of energy
                 required to piggy-back the data-usually incoming task
                 assignments and outgoing sensor results-on top of the
                 call. Here, we present an {\em Energy-Efficient Mobile
                 Crowdsensing\/} (EEMC) framework where task assignments
                 and sensing results are transferred in parallel with
                 phone calls. The main objective, and the principal
                 contribution of this article, is an MCS task assignment
                 scheme that guarantees that a minimum number of
                 anonymous participants return sensor results within a
                 specified time frame, while also minimizing the waste
                 of energy due to redundant task assignments and
                 considering privacy concerns of participants.
                 Evaluations with a large-scale real-world phone call
                 dataset show that our proposed {EEMC} framework
                 outperforms the baseline approaches, and it can reduce
                 overall energy consumption in data transfer by 54--66\%
                 when compared to the 3G-based solution.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "39",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:CSS,
  author =       "Wangsheng Zhang and Guande Qi and Gang Pan and Hua Lu
                 and Shijian Li and Zhaohui Wu",
  title =        "City-Scale Social Event Detection and Evaluation with
                 Taxi Traces",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "40:1--40:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700478",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "A social event is an occurrence that involves lots of
                 people and is accompanied by an obvious rise in human
                 flow. Analysis of social events has real-world
                 importance because events bring about impacts on many
                 aspects of city life. Traditionally, detection and
                 impact measurement of social events rely on social
                 investigation, which involves considerable human
                 effort. Recently, by analyzing messages in social
                 networks, researchers can also detect and evaluate
                 country-scale events. Nevertheless, the analysis of
                 city-scale events has not been explored. In this
                 article, we use human flow dynamics, which reflect the
                 social activeness of a region, to detect social events
                 and measure their impacts. We first extract human flow
                 dynamics from taxi traces. Second, we propose a method
                 that can not only discover the happening time and venue
                 of events from abnormal social activeness, but also
                 measure the scale of events through changes in such
                 activeness. Third, we extract traffic congestion
                 information from traces and use its change during
                 social events to measure their impact. The results of
                 experiments validate the effectiveness of both the
                 event detection and impact measurement methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "40",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sang:2015:ASC,
  author =       "Jitao Sang and Tao Mei and Changsheng Xu",
  title =        "Activity Sensor: Check-In Usage Mining for Local
                 Recommendation",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "41:1--41:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700468",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "While on the go, people are using their phones as a
                 personal concierge discovering what is around and
                 deciding what to do. Mobile phone has become a
                 recommendation terminal customized for
                 individuals-capable of recommending activities and
                 simplifying the accomplishment of related tasks. In
                 this article, we conduct usage mining on the check-in
                 data, with summarized statistics identifying the local
                 recommendation challenges of huge solution space,
                 sparse available data, and complicated user intent, and
                 discovered observations to motivate the hierarchical,
                 contextual, and sequential solution. We present a
                 point-of-interest (POI) category-transition--based
                 approach, with a goal of estimating the visiting
                 probability of a series of successive POIs conditioned
                 on current user context and sensor context. A mobile
                 local recommendation demo application is deployed. The
                 objective and subjective evaluations validate the
                 effectiveness in providing mobile users both accurate
                 recommendation and favorable user experience.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "41",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:EDQ,
  author =       "Bo Zhang and Zheng Song and Chi Harold Liu and Jian Ma
                 and Wendong Wang",
  title =        "An Event-Driven {QoI}-Aware Participatory Sensing
                 Framework with Energy and Budget Constraints",
  journal =      j-TIST,
  volume =       "6",
  number =       "3",
  pages =        "42:1--42:??",
  month =        may,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2630074",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu May 21 15:49:31 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Participatory sensing systems can be used for
                 concurrent event monitoring applications, like noise
                 levels, fire, and pollutant concentrations. However,
                 they are facing new challenges as to how to accurately
                 detect the exact boundaries of these events, and
                 further, to select the most appropriate participants to
                 collect the sensing data. On the one hand,
                 participants' handheld smart devices are constrained
                 with different energy conditions and sensing
                 capabilities, and they move around with uncontrollable
                 mobility patterns in their daily life. On the other
                 hand, these sensing tasks are within time-varying
                 quality-of-information (QoI) requirements and budget to
                 afford the users' incentive expectations. Toward this
                 end, this article proposes an event-driven QoI-aware
                 participatory sensing framework with energy and budget
                 constraints. The main method of this framework is event
                 boundary detection. For the former, a two-step
                 heuristic solution is proposed where the coarse-grained
                 detection step finds its approximation and the
                 fine-grained detection step identifies the exact
                 location. Participants are selected by explicitly
                 considering their mobility pattern, required QoI of
                 multiple tasks, and users' incentive requirements,
                 under the constraint of an aggregated task budget.
                 Extensive experimental results, based on a real trace
                 in Beijing, show the effectiveness and robustness of
                 our approach, while comparing with existing schemes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "42",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Anantharam:2015:ECT,
  author =       "Pramod Anantharam and Payam Barnaghi and Krishnaprasad
                 Thirunarayan and Amit Sheth",
  title =        "Extracting City Traffic Events from Social Streams",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "43:1--43:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2717317",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Cities are composed of complex systems with physical,
                 cyber, and social components. Current works on
                 extracting and understanding city events mainly rely on
                 technology-enabled infrastructure to observe and record
                 events. In this work, we propose an approach to
                 leverage citizen observations of various city systems
                 and services, such as traffic, public transport, water
                 supply, weather, sewage, and public safety, as a source
                 of city events. We investigate the feasibility of using
                 such textual streams for extracting city events from
                 annotated text. We formalize the problem of annotating
                 social streams such as microblogs as a sequence
                 labeling problem. We present a novel training data
                 creation process for training sequence labeling models.
                 Our automatic training data creation process utilizes
                 instance-level domain knowledge (e.g., locations in a
                 city, possible event terms). We compare this automated
                 annotation process to a state-of-the-art tool that
                 needs manually created training data and show that it
                 has comparable performance in annotation tasks. An
                 aggregation algorithm is then presented for event
                 extraction from annotated text. We carry out a
                 comprehensive evaluation of the event annotation and
                 event extraction on a real-world dataset consisting of
                 event reports and tweets collected over 4 months from
                 the San Francisco Bay Area. The evaluation results are
                 promising and provide insights into the utility of
                 social stream for extracting city events.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "43",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Sawant:2015:AGC,
  author =       "Anshul Sawant and John P. Dickerson and Mohammad T.
                 Hajiaghayi and V. S. Subrahmanian",
  title =        "Automated Generation of Counterterrorism Policies
                 Using Multiexpert Input",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "44:1--44:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2716328",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The use of game theory to model conflict has been
                 studied by several researchers, spearheaded by
                 Schelling. Most of these efforts assume a single payoff
                 matrix that captures players' utilities under different
                 assumptions about what the players will do. Our
                 experience in counterterrorism applications is that
                 experts disagree on these payoffs. We leverage
                 Shapley's notion of vector equilibria, which formulates
                 games where there are multiple payoff matrices, but
                 note that they are very hard to compute in practice. To
                 effectively enumerate large numbers of equilibria with
                 payoffs provided by multiple experts, we propose a
                 novel combination of vector payoffs and well-supported
                 $ \epsilon $-approximate equilibria. We develop bounds
                 related to computation of these equilibria for some
                 special cases and give a quasipolynomial time
                 approximation scheme (QPTAS) for the general case when
                 the number of players is small (which is true in many
                 real-world applications). Leveraging this QPTAS, we
                 give efficient algorithms to find such equilibria and
                 experimental results showing that they work well on
                 simulated data. We then built a policy recommendation
                 engine based on vector equilibria, called PREVE. We use
                 PREVE to model the terrorist group Lashkar-e-Taiba
                 (LeT), responsible for the 2008 Mumbai attacks, as a
                 five-player game. Specifically, we apply it to three
                 payoff matrices provided by experts in India--Pakistan
                 relations, analyze the equilibria generated by PREVE,
                 and suggest counterterrorism policies that may reduce
                 attacks by LeT. We briefly discuss these results and
                 identify their strengths and weaknesses from a policy
                 point of view.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "44",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Bai:2015:OPL,
  author =       "Aijun Bai and Feng Wu and Xiaoping Chen",
  title =        "Online Planning for Large {Markov} Decision Processes
                 with Hierarchical Decomposition",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "45:1--45:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2717316",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Markov decision processes (MDPs) provide a rich
                 framework for planning under uncertainty. However,
                 exactly solving a large MDP is usually intractable due
                 to the ``curse of dimensionality''- the state space
                 grows exponentially with the number of state variables.
                 Online algorithms tackle this problem by avoiding
                 computing a policy for the entire state space. On the
                 other hand, since online algorithm has to find a
                 near-optimal action online in almost real time, the
                 computation time is often very limited. In the context
                 of reinforcement learning, MAXQ is a value function
                 decomposition method that exploits the underlying
                 structure of the original MDP and decomposes it into a
                 combination of smaller subproblems arranged over a task
                 hierarchy. In this article, we present MAXQ-OP-a novel
                 online planning algorithm for large MDPs that utilizes
                 MAXQ hierarchical decomposition in online settings.
                 Compared to traditional online planning algorithms,
                 MAXQ-OP is able to reach much more deeper states in the
                 search tree with relatively less computation time by
                 exploiting MAXQ hierarchical decomposition online. We
                 empirically evaluate our algorithm in the standard Taxi
                 domain-a common benchmark for MDPs-to show the
                 effectiveness of our approach. We have also conducted a
                 long-term case study in a highly complex simulated
                 soccer domain and developed a team named WrightEagle
                 that has won five world champions and five runners-up
                 in the recent 10 years of RoboCup Soccer Simulation 2D
                 annual competitions. The results in the RoboCup domain
                 confirm the scalability of MAXQ-OP to very large
                 domains.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "45",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ye:2015:SSB,
  author =       "Yanfang Ye and Tao Li and Haiyin Shen",
  title =        "{Soter}: Smart Bracelets for Children's Safety",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "46:1--46:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700483",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In recent years, crimes against children and cases of
                 missing children have increased at a high rate.
                 Therefore, there is an urgent need for safety support
                 systems to prevent crimes against children or for
                 antiloss, especially when parents are not with their
                 children, such as to and from school. However, existing
                 children's tracking systems are not smart enough to
                 provide the safety supports, as they simply locate the
                 children's positions without offering any notification
                 to parents that their children may be in danger. In
                 addition, there is limited research on children's
                 tracking and their antiloss. In this article, based on
                 location histories, we introduce novel notions of
                 children's life patterns that capture their general
                 lifestyles and regularities, and develop an intelligent
                 data mining framework to learn the safe regions and
                 safe routes of children on the cloud side. When the
                 children may be in danger, their parents will receive
                 automatic notifications from the cloud. We also propose
                 an effective energy-efficient positioning scheme that
                 leverages the location tracking accuracy of the
                 children while keeping energy overhead low by using a
                 hybrid global positioning system and a global system
                 for mobile communications. To the best of our
                 knowledge, this is the first attempt in applying data
                 mining techniques to applications designed for
                 children's safety. Our proposed techniques have been
                 incorporated into Soter, a children's safeguard system
                 that is used to provide cloud service for smart
                 bracelets produced by Qihoo. The case studies on real
                 smart bracelet users of Qihoo demonstrate the
                 effectiveness of our proposed methods and Soter for
                 children's safety.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "46",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2015:PLL,
  author =       "Yi Wang and Xuemin Zhao and Zhenlong Sun and Hao Yan
                 and Lifeng Wang and Zhihui Jin and Liubin Wang and Yang
                 Gao and Ching Law and Jia Zeng",
  title =        "{Peacock}: Learning Long-Tail Topic Features for
                 Industrial Applications",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "47:1--47:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700497",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Latent Dirichlet allocation (LDA) is a popular topic
                 modeling technique in academia but less so in industry,
                 especially in large-scale applications involving search
                 engine and online advertising systems. A main
                 underlying reason is that the topic models used have
                 been too small in scale to be useful; for example, some
                 of the largest LDA models reported in literature have
                 up to 10$^3$ topics, which difficultly cover the
                 long-tail semantic word sets. In this article, we show
                 that the number of topics is a key factor that can
                 significantly boost the utility of topic-modeling
                 systems. In particular, we show that a ``big'' LDA
                 model with at least 10$^5$ topics inferred from 10$^9$
                 search queries can achieve a significant improvement on
                 industrial search engine and online advertising
                 systems, both of which serve hundreds of millions of
                 users. We develop a novel distributed system called
                 Peacock to learn big LDA models from big data. The main
                 features of Peacock include hierarchical distributed
                 architecture, real-time prediction, and topic
                 de-duplication. We empirically demonstrate that the
                 Peacock system is capable of providing significant
                 benefits via highly scalable LDA topic models for
                 several industrial applications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "47",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Jumadinova:2015:APM,
  author =       "Janyl Jumadinova and Prithviraj Dasgupta",
  title =        "Automated Pricing in a Multiagent Prediction Market
                 Using a Partially Observable Stochastic Game",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "48:1--48:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700488",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Prediction markets offer an efficient market-based
                 mechanism to aggregate large amounts of dispersed or
                 distributed information from different people to
                 predict the possible outcome of future events.
                 Recently, automated prediction markets where software
                 trading agents perform market operations such as
                 trading and updating beliefs on behalf of humans have
                 been proposed. A challenging aspect in automated
                 prediction markets is to develop suitable techniques
                 that can be used by automated trading agents to update
                 the price at which they should trade securities related
                 to an event so that they can increase their profit.
                 This problem is nontrivial, as the decision to trade
                 and the price at which trading should occur depends on
                 several dynamic factors, such as incoming information
                 related to the event for which the security is being
                 traded, the belief-update mechanism and risk attitude
                 of the trading agent, and the trading decision and
                 trading prices of other agents. To address this
                 problem, we have proposed a new behavior model for
                 trading agents based on a game-theoretic framework
                 called partially observable stochastic game with
                 information (POSGI). We propose a correlated
                 equilibrium (CE)-based solution strategy for this game
                 that allows each agent to dynamically choose an action
                 (to buy or sell or hold) in the prediction market. We
                 have also performed extensive simulation experiments
                 using the data obtained from the Intrade prediction
                 market for four different prediction markets. Our
                 results show that our POSGI model and CE strategy
                 produces prices that are strongly correlated with the
                 prices of the real prediction markets. Results
                 comparing our CE strategy with five other strategies
                 commonly used in similar market show that our CE
                 strategy improves price predictions and provides higher
                 utilities to the agents compared to other existing
                 strategies.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "48",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Fu:2015:ESG,
  author =       "Hao Fu and Aston Zhang and Xing Xie",
  title =        "Effective Social Graph Deanonymization Based on Graph
                 Structure and Descriptive Information",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "49:1--49:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700836",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The study of online social networks has attracted
                 increasing interest. However, concerns are raised for
                 the privacy risks of user data since they have been
                 frequently shared among researchers, advertisers, and
                 application developers. To solve this problem, a number
                 of anonymization algorithms have been recently
                 developed for protecting the privacy of social graphs.
                 In this article, we proposed a graph node similarity
                 measurement in consideration with both graph structure
                 and descriptive information, and a deanonymization
                 algorithm based on the measurement. Using the proposed
                 algorithm, we evaluated the privacy risks of several
                 typical anonymization algorithms on social graphs with
                 thousands of nodes from Microsoft Academic Search,
                 LiveJournal, and the Enron email dataset, and a social
                 graph with millions of nodes from Tencent Weibo. Our
                 results showed that the proposed algorithm was
                 efficient and effective to deanonymize social graphs
                 without any initial seed mappings. Based on the
                 experiments, we also pointed out suggestions on how to
                 better maintain the data utility while preserving
                 privacy.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "49",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2015:HIR,
  author =       "Bo-Hao Chen and Shih-Chia Huang and Jian Hui Ye",
  title =        "Hazy Image Restoration by Bi-Histogram Modification",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "50:1--50:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2710024",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Visibility restoration techniques are widely used for
                 information recovery of hazy images in many computer
                 vision applications. Estimation of haze density is an
                 essential task of visibility restoration techniques.
                 However, conventional visibility restoration techniques
                 often suffer from either the generation of serious
                 artifacts or the loss of object information in the
                 restored images due to uneven haze density, which
                 usually means that the images contain heavy haze
                 formation within their background regions and little
                 haze formation within their foreground regions. This
                 frequently occurs when the images feature real-world
                 scenes with a deep depth of field. How to effectively
                 and accurately estimate the haze density in the
                 transmission map for these images is the most
                 challenging aspect of the traditional state-of-the-art
                 techniques. In response to this problem, this work
                 proposes a novel visibility restoration approach that
                 is based on Bi-Histogram modification, and which
                 integrates a haze density estimation module and a haze
                 formation removal module for effective and accurate
                 estimation of haze density in the transmission map. As
                 our experimental results demonstrate, the proposed
                 approach achieves superior visibility restoration
                 efficacy in comparison with the other state-of-the-art
                 approaches based on both qualitative and quantitative
                 evaluations. The proposed approach proves effective and
                 accurate in terms of both background and foreground
                 restoration of various hazy scenarios.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "50",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Combi:2015:IAT,
  author =       "Carlo Combi and Jiming Liu",
  title =        "Introduction to the {ACM TIST} Special Issue on
                 Intelligent Healthcare Informatics",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "51:1--51:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2791398",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "51",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Kim:2015:AAR,
  author =       "Eunju Kim and Sumi Helal and Chris Nugent and Mark
                 Beattie",
  title =        "Analyzing Activity Recognition Uncertainties in Smart
                 Home Environments",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "52:1--52:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2651445",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In spite of the importance of activity recognition
                 (AR) for intelligent human-computer interaction in
                 emerging smart space applications, state-of-the-art AR
                 technology is not ready or adequate for real-world
                 deployments due to its insufficient accuracy. The
                 accuracy limitation is directly attributed to
                 uncertainties stemming from multiple sources in the AR
                 system. Hence, one of the major goals of AR research is
                 to improve system accuracy by minimizing or managing
                 the uncertainties encountered throughout the AR
                 process. As we cannot manage uncertainties well without
                 measuring them, we must first quantify their impact.
                 Nevertheless, such a quantification process is very
                 challenging given that uncertainties come from diverse
                 and heterogeneous sources. In this article, we propose
                 an approach, which can account for multiple uncertainty
                 sources and assess their impact on AR systems. We
                 introduce several metrics to quantify the various
                 uncertainties and their impact. We then conduct a
                 quantitative impact analysis of uncertainties utilizing
                 data collected from actual smart spaces that we have
                 instrumented. The analysis is intended to serve as
                 groundwork for developing ``diagnostic'' accuracy
                 measures of AR systems capable of pinpointing the
                 sources of accuracy loss. This is to be contrasted with
                 the currently used accuracy measures.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "52",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Soto-Mendoza:2015:DPS,
  author =       "Valeria Soto-Mendoza and J. Antonio
                 Garc{\'\i}a-Mac{\'\i}as and Edgar Ch{\'a}vez and Ana I.
                 Mart{\'\i}nez-Garc{\'\i}a and Jes{\'u}s Favela and
                 Patricia Serrano-Alvarado and Mayth{\'e} R.
                 Z{\'u}{\~n}iga Rojas",
  title =        "Design of a Predictive Scheduling System to Improve
                 Assisted Living Services for Elders",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "53:1--53:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2736700",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "As the number of older adults increases, and with it
                 the demand for dedicated care, geriatric residences
                 face a shortage of caregivers, who themselves
                 experience work overload, stress, and burden. We
                 conducted a long-term field study in three geriatric
                 residences to understand the work conditions of
                 caregivers with the aim of developing technologies to
                 assist them in their work and help them deal with their
                 burdens. From this study, we obtained relevant
                 requirements and insights to design, implement, and
                 evaluate two prototypes for supporting caregivers'
                 tasks (e.g., electronic recording and automatic
                 notifications) in order to validate the feasibility of
                 their implementation in situ and their technical
                 requirements. The evaluation in situ of the prototypes
                 was conducted for a period of 4 weeks. The results of
                 the evaluation, together with the data collected from 6
                 months of use, motivated the design of a predictive
                 schedule, which was iteratively improved and evaluated
                 in participative sessions with caregivers. PRESENCE,
                 the predictive schedule we propose, triggers real-time
                 alerts of risky situations (e.g., falls, entering
                 off-limits areas such as the infirmary or the kitchen)
                 and informs caregivers of routine tasks that need to be
                 performed (e.g., medication administration, diaper
                 change, etc.). Moreover, PRESENCE helps caregivers to
                 record caring tasks (such as diaper changes or
                 medication) and well-being assessments (such as the
                 mood) that are difficult to automate. This facilitates
                 caregiver's shift handover and can help to train new
                 caregivers by suggesting routine tasks and by sending
                 reminders and timely information about residents. It
                 can be seen as a tool to reduce the workload of
                 caregivers and medical staff. Instead of trying to
                 substitute the caregiver with an automatic caring
                 system, as proposed by others, we propose our
                 predictive schedule system that blends caregiver
                 assessments and measurements from sensors. We show the
                 feasibility of predicting caregiver tasks and a
                 formative evaluation with caregivers that provides
                 preliminary evidence of its utility.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "53",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Champaign:2015:EPC,
  author =       "John Champaign and Robin Cohen and Disney Yan Lam",
  title =        "Empowering Patients and Caregivers to Manage
                 Healthcare Via Streamlined Presentation of {Web}
                 Objects Selected by Modeling Learning Benefits Obtained
                 by Similar Peers",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "54:1--54:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700480",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we introduce a framework for
                 selecting web objects (texts, videos, simulations) from
                 a large online repository to present to patients and
                 caregivers, in order to assist in their healthcare.
                 Motivated by the paradigm of peer-based intelligent
                 tutoring, we model the learning gains achieved by users
                 when exposed to specific web objects in order to
                 recommend those objects most likely to deliver benefit
                 to new users. We are able to show that this streamlined
                 presentation leads to effective knowledge gains, both
                 through a process of simulated learning and through a
                 user study, for the specific application of caring for
                 children with autism. The value of our framework for
                 peer-driven content selection of health information is
                 emphasized through two additional roles for peers:
                 attaching commentary to web objects and proposing
                 subdivided objects for presentation, both of which are
                 demonstrated to deliver effective learning gains, in
                 simulations. In all, we are offering an opportunity for
                 patients to navigate the deep waters of excessive
                 online information towards effective management of
                 healthcare, through content selection influenced by
                 previous peer experiences.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "54",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yang:2015:UHC,
  author =       "Haodong Yang and Christopher C. Yang",
  title =        "Using Health-Consumer-Contributed Data to Detect
                 Adverse Drug Reactions by Association Mining with
                 Temporal Analysis",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "55:1--55:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700482",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Since adverse drug reactions (ADRs) represent a
                 significant health problem all over the world, ADR
                 detection has become an important research topic in
                 drug safety surveillance. As many potential ADRs cannot
                 be detected though premarketing review, drug safety
                 currently depends heavily on postmarketing
                 surveillance. Particularly, current postmarketing
                 surveillance in the United States primarily relies on
                 the FDA Adverse Event Reporting System (FAERS).
                 However, the effectiveness of such spontaneous
                 reporting systems for ADR detection is not as good as
                 expected because of the extremely high underreporting
                 ratio of ADRs. Moreover, it often takes the FDA years
                 to complete the whole process of collecting reports,
                 investigating cases, and releasing alerts. Given the
                 prosperity of social media, many online health
                 communities are publicly available for health consumers
                 to share and discuss any healthcare experience such as
                 ADRs they are suffering. Such
                 health-consumer-contributed content is timely and
                 informative, but this data source still remains
                 untapped for postmarketing drug safety surveillance. In
                 this study, we propose to use (1) association mining to
                 identify the relations between a drug and an ADR and
                 (2) temporal analysis to detect drug safety signals at
                 the early stage. We collect data from MedHelp and use
                 the FDA's alerts and information of drug labeling
                 revision as the gold standard to evaluate the
                 effectiveness of our approach. The experiment results
                 show that health-related social media is a promising
                 source for ADR detection, and our proposed techniques
                 are effective to identify early ADR signals.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "55",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ullah:2015:ERL,
  author =       "Md Zia Ullah and Masaki Aono and Md Hanif Seddiqui",
  title =        "Estimating a Ranked List of Human Genetic Diseases by
                 Associating Phenotype-Gene with Gene-Disease Bipartite
                 Graphs",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "56:1--56:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700487",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "With vast amounts of medical knowledge available on
                 the Internet, it is becoming increasingly practical to
                 help doctors in clinical diagnostics by suggesting
                 plausible diseases predicted by applying data and text
                 mining technologies. Recently, Genome-Wide Association
                 Studies ( GWAS ) have proved useful as a method for
                 exploring phenotypic associations with diseases.
                 However, since genetic diseases are difficult to
                 diagnose because of their low prevalence, large number,
                 and broad diversity of symptoms, genetic disease
                 patients are often misdiagnosed or experience long
                 diagnostic delays. In this article, we propose a method
                 for ranking genetic diseases for a set of clinical
                 phenotypes. In this regard, we associate a
                 phenotype-gene bipartite graph ( PGBG ) with a
                 gene-disease bipartite graph ( GDBG ) by producing a
                 phenotype-disease bipartite graph ( PDBG ), and we
                 estimate the candidate weights of diseases. In our
                 approach, all paths from a phenotype to a disease are
                 explored by considering causative genes to assign a
                 weight based on path frequency, and the phenotype is
                 linked to the disease in a new PDBG. We introduce the
                 Bidirectionally induced Importance Weight ( BIW )
                 prediction method to PDBG for approximating the weights
                 of the edges of diseases with phenotypes by considering
                 link information from both sides of the bipartite
                 graph. The performance of our system is compared to
                 that of other known related systems by estimating
                 Normalized Discounted Cumulative Gain ( NDCG ), Mean
                 Average Precision ( MAP ), and Kendall's tau metrics.
                 Further experiments are conducted with well-known TF $
                 \cdot $ IDF, BM25, and Jenson-Shannon divergence as
                 baselines. The result shows that our proposed method
                 outperforms the known related tool Phenomizer in terms
                 of NDCG@10, NDCG@20, MAP@10, and MAP@20; however, it
                 performs worse than Phenomizer in terms of Kendall's
                 tau-b metric at the top-10 ranks. It also turns out
                 that our proposed method has overall better performance
                 than the baseline methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "56",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Antonelli:2015:MCM,
  author =       "Dario Antonelli and Elena Baralis and Giulia Bruno and
                 Luca Cagliero and Tania Cerquitelli and Silvia Chiusano
                 and Paolo Garza and Naeem A. Mahoto",
  title =        "{MeTA}: Characterization of Medical Treatments at
                 Different Abstraction Levels",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "57:1--57:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700479",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Physicians and health care organizations always
                 collect large amounts of data during patient care.
                 These large and high-dimensional datasets are usually
                 characterized by an inherent sparseness. Hence,
                 analyzing these datasets to figure out interesting and
                 hidden knowledge is a challenging task. This article
                 proposes a new data mining framework based on
                 generalized association rules to discover
                 multiple-level correlations among patient data.
                 Specifically, correlations among prescribed
                 examinations, drugs, and patient profiles are
                 discovered and analyzed at different abstraction
                 levels. The rule extraction process is driven by a
                 taxonomy to generalize examinations and drugs into
                 their corresponding categories. To ease the manual
                 inspection of the result, a worthwhile subset of rules
                 (i.e., nonredundant generalized rules) is considered.
                 Furthermore, rules are classified according to the
                 involved data features (medical treatments or patient
                 profiles) and then explored in a top-down fashion: from
                 the small subset of high-level rules, a drill-down is
                 performed to target more specific rules. The
                 experiments, performed on a real diabetic patient
                 dataset, demonstrate the effectiveness of the proposed
                 approach in discovering interesting rule groups at
                 different abstraction levels.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "57",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Motai:2015:SCD,
  author =       "Yuichi Motai and Dingkun Ma and Alen Docef and
                 Hiroyuki Yoshida",
  title =        "Smart Colonography for Distributed Medical Databases
                 with Group Kernel Feature Analysis",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "58:1--58:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668136",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Computer-Aided Detection (CAD) of polyps in Computed
                 Tomographic (CT) colonography is currently very limited
                 since a single database at each hospital/institution
                 doesn't provide sufficient data for training the CAD
                 system's classification algorithm. To address this
                 limitation, we propose to use multiple databases,
                 (e.g., big data studies) to create multiple
                 institution-wide databases using distributed computing
                 technologies, which we call smart colonography. Smart
                 colonography may be built by a larger colonography
                 database networked through the participation of
                 multiple institutions via distributed computing. The
                 motivation herein is to create a distributed database
                 that increases the detection accuracy of CAD diagnosis
                 by covering many true-positive cases. Colonography data
                 analysis is mutually accessible to increase the
                 availability of resources so that the knowledge of
                 radiologists is enhanced. In this article, we propose a
                 scalable and efficient algorithm called Group Kernel
                 Feature Analysis (GKFA), which can be applied to
                 multiple cancer databases so that the overall
                 performance of CAD is improved. The key idea behind the
                 proposed GKFA method is to allow the feature space to
                 be updated as the training proceeds with more data
                 being fed from other institutions into the algorithm.
                 Experimental results show that GKFA achieves very good
                 classification accuracy.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "58",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Kim:2015:RPR,
  author =       "Mi-Young Kim and Ying Xu and Osmar R. Zaiane and Randy
                 Goebel",
  title =        "Recognition of Patient-Related Named Entities in Noisy
                 Tele-Health Texts",
  journal =      j-TIST,
  volume =       "6",
  number =       "4",
  pages =        "59:1--59:??",
  month =        aug,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2651444",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Thu Aug 13 17:37:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We explore methods for effectively extracting
                 information from clinical narratives that are captured
                 in a public health consulting phone service called
                 HealthLink. Our research investigates the application
                 of state-of-the-art natural language processing and
                 machine learning to clinical narratives to extract
                 information of interest. The currently available data
                 consist of dialogues constructed by nurses while
                 consulting patients by phone. Since the data are
                 interviews transcribed by nurses during phone
                 conversations, they include a significant volume and
                 variety of noise. When we extract the patient-related
                 information from the noisy data, we have to remove or
                 correct at least two kinds of noise: explicit noise,
                 which includes spelling errors, unfinished sentences,
                 omission of sentence delimiters, and variants of terms,
                 and implicit noise, which includes non-patient
                 information and patient's untrustworthy information. To
                 filter explicit noise, we propose our own biomedical
                 term detection/normalization method: it resolves
                 misspelling, term variations, and arbitrary
                 abbreviation of terms by nurses. In detecting temporal
                 terms, temperature, and other types of named entities
                 (which show patients' personal information such as age
                 and sex), we propose a bootstrapping-based pattern
                 learning process to detect a variety of arbitrary
                 variations of named entities. To address implicit
                 noise, we propose a dependency path-based filtering
                 method. The result of our denoising is the extraction
                 of normalized patient information, and we visualize the
                 named entities by constructing a graph that shows the
                 relations between named entities. The objective of this
                 knowledge discovery task is to identify associations
                 between biomedical terms and to clearly expose the
                 trends of patients' symptoms and concern; the
                 experimental results show that we achieve reasonable
                 performance with our noise reduction methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "59",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ding:2015:LRN,
  author =       "Wenkui Ding and Xiubo Geng and Xu-Dong Zhang",
  title =        "Learning to Rank from Noisy Data",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "1:1--1:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2576230",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Learning to rank, which learns the ranking function
                 from training data, has become an emerging research
                 area in information retrieval and machine learning.
                 Most existing work on learning to rank assumes that the
                 training data is clean, which is not always true,
                 however. The ambiguity of query intent, the lack of
                 domain knowledge, and the vague definition of relevance
                 levels all make it difficult for common annotators to
                 give reliable relevance labels to some documents. As a
                 result, the relevance labels in the training data of
                 learning to rank usually contain noise. If we ignore
                 this fact, the performance of learning-to-rank
                 algorithms will be damaged. In this article, we propose
                 considering the labeling noise in the process of
                 learning to rank and using a two-step approach to
                 extend existing algorithms to handle noisy training
                 data. In the first step, we estimate the degree of
                 labeling noise for a training document. To this end, we
                 assume that the majority of the relevance labels in the
                 training data are reliable and we use a graphical model
                 to describe the generative process of a training query,
                 the feature vectors of its associated documents, and
                 the relevance labels of these documents. The parameters
                 in the graphical model are learned by means of maximum
                 likelihood estimation. Then the conditional probability
                 of the relevance label given the feature vector of a
                 document is computed. If the probability is large, we
                 regard the degree of labeling noise for this document
                 as small; otherwise, we regard the degree as large. In
                 the second step, we extend existing learning-to-rank
                 algorithms by incorporating the estimated degree of
                 labeling noise into their loss functions. Specifically,
                 we give larger weights to those training documents with
                 smaller degrees of labeling noise and smaller weights
                 to those with larger degrees of labeling noise. As
                 examples, we demonstrate the extensions for McRank,
                 RankSVM, RankBoost, and RankNet. Empirical results on
                 benchmark datasets show that the proposed approach can
                 effectively distinguish noisy documents from clean
                 ones, and the extended learning-to-rank algorithms can
                 achieve better performances than baselines.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2015:LSB,
  author =       "Fan Liu and Jinhui Tang and Yan Song and Liyan Zhang
                 and Zhenmin Tang",
  title =        "Local Structure-Based Sparse Representation for Face
                 Recognition",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "2:1--2:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2733383",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article presents a simple yet effective face
                 recognition method, called local structure-based sparse
                 representation classification (LS\_SRC). Motivated by
                 the ``divide-and-conquer'' strategy, we first divide
                 the face into local blocks and classify each local
                 block, then integrate all the classification results to
                 make the final decision. To classify each local block,
                 we further divide each block into several overlapped
                 local patches and assume that these local patches lie
                 in a linear subspace. This subspace assumption reflects
                 the local structure relationship of the overlapped
                 patches, making sparse representation-based
                 classification (SRC) feasible even when encountering
                 the single-sample-per-person (SSPP) problem. To lighten
                 the computing burden of LS\_SRC, we further propose the
                 local structure-based collaborative representation
                 classification (LS\_CRC). Moreover, the performance of
                 LS\_SRC and LS\_CRC can be further improved by using
                 the confusion matrix of the classifier. Experimental
                 results on four public face databases show that our
                 methods not only generalize well to SSPP problem but
                 also have strong robustness to occlusion; little pose
                 variation; and the variations of expression,
                 illumination, and time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Groves:2015:OAT,
  author =       "William Groves and Maria Gini",
  title =        "On Optimizing Airline Ticket Purchase Timing",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "3:1--3:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2733384",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Proper timing of the purchase of airline tickets is
                 difficult even when historical ticket prices and some
                 domain knowledge are available. To address this
                 problem, we introduce an algorithm that optimizes
                 purchase timing on behalf of customers and provides
                 performance estimates of its computed action policy.
                 Given a desired flight route and travel date, the
                 algorithm uses machine-learning methods on recent
                 ticket price quotes from many competing airlines to
                 predict the future expected minimum price of all
                 available flights. The main novelty of our algorithm
                 lies in using a systematic feature-selection technique,
                 which captures time dependencies in the data by using
                 time-delayed features, and reduces the number of
                 features by imposing a class hierarchy among the raw
                 features and pruning the features based on in-situ
                 performance. Our algorithm achieves much closer to the
                 optimal purchase policy than other existing decision
                 theoretic approaches for this domain, and meets or
                 exceeds the performance of existing feature-selection
                 methods from the literature. Applications of our
                 feature-selection process to other domains are also
                 discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Dong:2015:NMR,
  author =       "Yongsheng Dong and Dacheng Tao and Xuelong Li",
  title =        "Nonnegative Multiresolution Representation-Based
                 Texture Image Classification",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2738050",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Effective representation of image texture is important
                 for an image-classification task. Statistical modelling
                 in wavelet domains has been widely used to image
                 texture representation. However, due to the intraclass
                 complexity and interclass diversity of textures, it is
                 hard to use a predefined probability distribution
                 function to fit adaptively all wavelet subband
                 coefficients of different textures. In this article, we
                 propose a novel modelling approach, Heterogeneous and
                 Incrementally Generated Histogram (HIGH), to indirectly
                 model the wavelet coefficients by use of four local
                 features in wavelet subbands. By concatenating all the
                 HIGHs in all wavelet subbands of a texture, we can
                 construct a nonnegative multiresolution vector (NMV) to
                 represent a texture image. Considering the NMV's high
                 dimensionality and nonnegativity, we further propose a
                 Hessian regularized discriminative nonnegative matrix
                 factorization to compute a low-dimensional basis of the
                 linear subspace of NMVs. Finally, we present a texture
                 classification approach by projecting NMVs on the
                 low-dimensional basis. Experimental results show that
                 our proposed texture classification method outperforms
                 seven representative approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2015:MKM,
  author =       "Bowei Chen and Jun Wang and Ingemar J. Cox and Mohan
                 S. Kankanhalli",
  title =        "Multi-Keyword Multi-Click Advertisement Option
                 Contracts for Sponsored Search",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2743027",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In sponsored search, advertisement (abbreviated ad)
                 slots are usually sold by a search engine to an
                 advertiser through an auction mechanism in which
                 advertisers bid on keywords. In theory, auction
                 mechanisms have many desirable economic properties.
                 However, keyword auctions have a number of limitations
                 including: the uncertainty in payment prices for
                 advertisers; the volatility in the search engine's
                 revenue; and the weak loyalty between advertiser and
                 search engine. In this article, we propose a special ad
                 option that alleviates these problems. In our proposal,
                 an advertiser can purchase an option from a search
                 engine in advance by paying an upfront fee, known as
                 the option price. The advertiser then has the right,
                 but no obligation, to purchase among the prespecified
                 set of keywords at the fixed cost-per-clicks (CPCs) for
                 a specified number of clicks in a specified period of
                 time. The proposed option is closely related to a
                 special exotic option in finance that contains multiple
                 underlying assets (multi-keyword) and is also
                 multi-exercisable (multi-click). This novel structure
                 has many benefits: advertisers can have reduced
                 uncertainty in advertising; the search engine can
                 improve the advertisers' loyalty as well as obtain a
                 stable and increased expected revenue over time. Since
                 the proposed ad option can be implemented in
                 conjunction with the existing keyword auctions, the
                 option price and corresponding fixed CPCs must be set
                 such that there is no arbitrage between the two
                 markets. Option pricing methods are discussed and our
                 experimental results validate the development. Compared
                 to keyword auctions, a search engine can have an
                 increased expected revenue by selling an ad option.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Font:2015:AIT,
  author =       "Frederic Font and Joan Serr{\`a} and Xavier Serra",
  title =        "Analysis of the Impact of a Tag Recommendation System
                 in a Real-World Folksonomy",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2743026",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Collaborative tagging systems have emerged as a
                 successful solution for annotating contributed
                 resources to online sharing platforms, facilitating
                 searching, browsing, and organizing their contents. To
                 aid users in the annotation process, several tag
                 recommendation methods have been proposed. It has been
                 repeatedly hypothesized that these methods should
                 contribute to improving annotation quality and reducing
                 the cost of the annotation process. It has been also
                 hypothesized that these methods should contribute to
                 the consolidation of the vocabulary of collaborative
                 tagging systems. However, to date, no empirical and
                 quantitative result supports these hypotheses. In this
                 work, we deeply analyze the impact of a tag
                 recommendation system in the folksonomy of Freesound, a
                 real-world and large-scale online sound sharing
                 platform. Our results suggest that tag recommendation
                 effectively increases vocabulary sharing among users of
                 the platform. In addition, tag recommendation is shown
                 to contribute to the convergence of the vocabulary as
                 well as to a partial increase in the quality of
                 annotations. However, according to our analysis, the
                 cost of the annotation process does not seem to be
                 effectively reduced. Our work is relevant to increase
                 our understanding about the nature of tag
                 recommendation systems and points to future directions
                 for the further development of those systems and their
                 analysis.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Cheng:2015:HBS,
  author =       "Fan-Chieh Cheng and Bo-Hao Chen and Shih-Chia Huang",
  title =        "A Hybrid Background Subtraction Method with Background
                 and Foreground Candidates Detection",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "7:1--7:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2746409",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Background subtraction for motion detection is often
                 used in video surveillance systems. However,
                 difficulties in bootstrapping restrict its development.
                 This article proposes a novel hybrid background
                 subtraction technique to solve this problem. For
                 performance improvement of background subtraction, the
                 proposed technique not only quickly initializes the
                 background model but also eliminates unnecessary
                 regions containing only background pixels in the object
                 detection process. Furthermore, an embodiment based on
                 the proposed technique is also presented. Experimental
                 results verify that the proposed technique allows for
                 reduced execution time as well as improvement of
                 performance as evaluated by Recall, Precision, F1, and
                 Similarity metrics when used with state-of-the-art
                 background subtraction methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Muntean:2015:LPM,
  author =       "Cristina Ioana Muntean and Franco Maria Nardini and
                 Fabrizio Silvestri and Ranieri Baraglia",
  title =        "On Learning Prediction Models for Tourists Paths",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "8:1--8:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2766459",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we tackle the problem of predicting
                 the ``next'' geographical position of a tourist, given
                 her history (i.e., the prediction is done accordingly
                 to the tourist's current trail) by means of supervised
                 learning techniques, namely Gradient Boosted Regression
                 Trees and Ranking SVM. The learning is done on the
                 basis of an object space represented by a 68-dimension
                 feature vector specifically designed for
                 tourism-related data. Furthermore, we propose a
                 thorough comparison of several methods that are
                 considered state-of-the-art in recommender and trail
                 prediction systems for tourism, as well as a popularity
                 baseline. Experiments show that the methods we propose
                 consistently outperform the baselines and provide
                 strong evidence of the performance and robustness of
                 our solutions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Wang:2015:WHP,
  author =       "Yinting Wang and Mingli Song and Dacheng Tao and Yong
                 Rui and Jiajun Bu and Ah Chung Tsoi and Shaojie Zhuo
                 and Ping Tan",
  title =        "{Where2Stand}: a Human Position Recommendation System
                 for Souvenir Photography",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "9:1--9:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2770879",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "People often take photographs at tourist sites and
                 these pictures usually have two main elements: a person
                 in the foreground and scenery in the background. This
                 type of ``souvenir photo'' is one of the most common
                 photos clicked by tourists. Although algorithms that
                 aid a user-photographer in taking a well-composed
                 picture of a scene exist [Ni et al. 2013], few studies
                 have addressed the issue of properly positioning human
                 subjects in photographs. In photography, the common
                 guidelines of composing portrait images exist. However,
                 these rules usually do not consider the background
                 scene. Therefore, in this article, we investigate
                 human-scenery positional relationships and construct a
                 photographic assistance system to optimize the position
                 of human subjects in a given background scene, thereby
                 assisting the user in capturing high-quality souvenir
                 photos. We collect thousands of well-composed portrait
                 photographs to learn human-scenery aesthetic
                 composition rules. In addition, we define a set of
                 negative rules to exclude undesirable compositions.
                 Recommendation results are achieved by combining the
                 first learned positive rule with our proposed negative
                 rules. We implement the proposed system on an Android
                 platform in a smartphone. The system demonstrates its
                 efficacy by producing well-composed souvenir photos.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hennes:2015:MLS,
  author =       "Daniel Hennes and Steven {De Jong} and Karl Tuyls and
                 Ya'akov (Kobi) Gal",
  title =        "Metastrategies in Large-Scale Bargaining Settings",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "10:1--10:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2774224",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article presents novel methods for representing
                 and analyzing a special class of multiagent bargaining
                 settings that feature multiple players, large action
                 spaces, and a relationship among players' goals, tasks,
                 and resources. We show how to reduce these interactions
                 to a set of bilateral normal-form games in which the
                 strategy space is significantly smaller than the
                 original settings while still preserving much of their
                 structural relationship. The method is demonstrated
                 using the Colored Trails (CT) framework, which
                 encompasses a broad family of games and has been used
                 in many past studies. We define a set of heuristics
                 (metastrategies) in multiplayer CT games that make
                 varying assumptions about players' strategies, such as
                 boundedly rational play and social preferences. We show
                 how these CT settings can be decomposed into canonical
                 bilateral games such as the Prisoners' Dilemma, Stag
                 Hunt, and Ultimatum games in a way that significantly
                 facilitates their analysis. We demonstrate the
                 feasibility of this approach in separate CT settings
                 involving one-shot and repeated bargaining scenarios,
                 which are subsequently analyzed using evolutionary
                 game-theoretic techniques. We provide a set of
                 necessary conditions for CT games for allowing this
                 decomposition. Our results have significance for
                 multiagent systems researchers in mapping large
                 multiplayer CT task settings to smaller, well-known
                 bilateral normal-form games while preserving some of
                 the structure of the original setting.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2015:SSI,
  author =       "Jia-Dong Zhang and Chi-Yin Chow",
  title =        "Spatiotemporal Sequential Influence Modeling for
                 Location Recommendations: a Gravity-based Approach",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "11:1--11:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2786761",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Recommending to users personalized locations is an
                 important feature of Location-Based Social Networks
                 (LBSNs), which benefits users who wish to explore new
                 places and businesses to discover potential customers.
                 In LBSNs, social and geographical influences have been
                 intensively used in location recommendations. However,
                 human movement also exhibits spatiotemporal sequential
                 patterns, but only a few current studies consider the
                 spatiotemporal sequential influence of locations on
                 users' check-in behaviors. In this article, we propose
                 a new gravity model for location recommendations,
                 called LORE, to exploit the spatiotemporal sequential
                 influence on location recommendations. First, LORE
                 extracts sequential patterns from historical check-in
                 location sequences of all users as a Location-Location
                 Transition Graph (L$^2$ TG), and utilizes the L$^2$ TG
                 to predict the probability of a user visiting a new
                 location through the developed additive Markov chain
                 that considers the effect of all visited locations in
                 the check-in history of the user on the new location.
                 Furthermore, LORE applies our contrived gravity model
                 to weigh the effect of each visited location on the new
                 location derived from the personalized attractive force
                 (i.e., the weight) between the visited location and the
                 new location. The gravity model effectively integrates
                 the spatiotemporal, social, and popularity influences
                 by estimating a power-law distribution based on (i) the
                 spatial distance and temporal difference between two
                 consecutive check-in locations of the same user, (ii)
                 the check-in frequency of social friends, and (iii) the
                 popularity of locations from all users. Finally, we
                 conduct a comprehensive performance evaluation for LORE
                 using three large-scale real-world datasets collected
                 from Foursquare, Gowalla, and Brightkite. Experimental
                 results show that LORE achieves significantly superior
                 location recommendations compared to other
                 state-of-the-art location recommendation techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Guan:2015:DML,
  author =       "Tao Guan and Yuesong Wang and Liya Duan and Rongrong
                 Ji",
  title =        "On-Device Mobile Landmark Recognition Using Binarized
                 Descriptor with Multifeature Fusion",
  journal =      j-TIST,
  volume =       "7",
  number =       "1",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2795234",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Along with the exponential growth of high-performance
                 mobile devices, on-device Mobile Landmark Recognition
                 (MLR) has recently attracted increasing research
                 attention. However, the latency and accuracy of
                 automatic recognition remain as bottlenecks against its
                 real-world usage. In this article, we introduce a novel
                 framework that combines interactive image segmentation
                 with multifeature fusion to achieve improved MLR with
                 high accuracy. First, we propose an effective vector
                 binarization method to reduce the memory usage of image
                 descriptors extracted on-device, which maintains
                 comparable recognition accuracy to the original
                 descriptors. Second, we design a location-aware fusion
                 algorithm that can fuse multiple visual features into a
                 compact yet discriminative image descriptor to improve
                 on-device efficiency. Third, a user-friendly
                 interaction scheme is developed that enables
                 interactive foreground/background segmentation to
                 largely improve recognition accuracy. Experimental
                 results demonstrate the effectiveness of the proposed
                 algorithms for on-device MLR applications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2016:EFC,
  author =       "Kun Zhang and Zhikun Wang and Jiji Zhang and Bernhard
                 Sch{\"o}lkopf",
  title =        "On Estimation of Functional Causal Models: General
                 Results and Application to the Post-Nonlinear Causal
                 Model",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "13:1--13:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700476",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Compared to constraint-based causal discovery, causal
                 discovery based on functional causal models is able to
                 identify the whole causal model under appropriate
                 assumptions [Shimizu et al. 2006; Hoyer et al. 2009;
                 Zhang and Hyv{\"a}rinen 2009b]. Functional causal
                 models represent the effect as a function of the direct
                 causes together with an independent noise term.
                 Examples include the linear non-Gaussian acyclic model
                 (LiNGAM), nonlinear additive noise model, and
                 post-nonlinear (PNL) model. Currently, there are two
                 ways to estimate the parameters in the models:
                 dependence minimization and maximum likelihood. In this
                 article, we show that for any acyclic functional causal
                 model, minimizing the mutual information between the
                 hypothetical cause and the noise term is equivalent to
                 maximizing the data likelihood with a flexible model
                 for the distribution of the noise term. We then focus
                 on estimation of the PNL causal model and propose to
                 estimate it with the warped Gaussian process with the
                 noise modeled by the mixture of Gaussians. As a
                 Bayesian nonparametric approach, it outperforms the
                 previous one based on mutual information minimization
                 with nonlinear functions represented by multilayer
                 perceptrons; we also show that unlike the ordinary
                 regression, estimation results of the PNL causal model
                 are sensitive to the assumption on the noise
                 distribution. Experimental results on both synthetic
                 and real data support our theoretical claims.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2016:OSC,
  author =       "Jiuyong Li and Thuc Duy Le and Lin Liu and Jixue Liu
                 and Zhou Jin and Bingyu Sun and Saisai Ma",
  title =        "From Observational Studies to Causal Rule Mining",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "14:1--14:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2746410",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Randomised controlled trials (RCTs) are the most
                 effective approach to causal discovery, but in many
                 circumstances it is impossible to conduct RCTs.
                 Therefore, observational studies based on passively
                 observed data are widely accepted as an alternative to
                 RCTs. However, in observational studies, prior
                 knowledge is required to generate the hypotheses about
                 the cause-effect relationships to be tested, and hence
                 they can only be applied to problems with available
                 domain knowledge and a handful of variables. In
                 practice, many datasets are of high dimensionality,
                 which leaves observational studies out of the
                 opportunities for causal discovery from such a wealth
                 of data sources. In another direction, many efficient
                 data mining methods have been developed to identify
                 associations among variables in large datasets. The
                 problem is that causal relationships imply
                 associations, but the reverse is not always true.
                 However, we can see the synergy between the two
                 paradigms here. Specifically, association rule mining
                 can be used to deal with the high-dimensionality
                 problem, whereas observational studies can be utilised
                 to eliminate noncausal associations. In this article,
                 we propose the concept of causal rules (CRs) and
                 develop an algorithm for mining CRs in large datasets.
                 We use the idea of retrospective cohort studies to
                 detect CRs based on the results of association rule
                 mining. Experiments with both synthetic and real-world
                 datasets have demonstrated the effectiveness and
                 efficiency of CR mining. In comparison with the
                 commonly used causal discovery methods, the proposed
                 approach generally is faster and has better or
                 competitive performance in finding correct or sensible
                 causes. It is also capable of finding a cause
                 consisting of multiple variables-a feature that other
                 causal discovery methods do not possess.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Leiva:2016:GGG,
  author =       "Luis A. Leiva and Daniel Mart{\'\i}n-Albo and
                 R{\'e}jean Plamondon",
  title =        "Gestures {\`a} Go Go: Authoring Synthetic Human-Like
                 Stroke Gestures Using the Kinematic Theory of Rapid
                 Movements",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "15:1--15:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2799648",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Training a high-quality gesture recognizer requires
                 providing a large number of examples to enable good
                 performance on unseen, future data. However, recruiting
                 participants, data collection, and labeling, etc.,
                 necessary for achieving this goal are usually time
                 consuming and expensive. Thus, it is important to
                 investigate how to empower developers to quickly
                 collect gesture samples for improving UI usage and user
                 experience. In response to this need, we introduce
                 Gestures {\`a} Go Go ( g3), a web service plus an
                 accompanying web application for bootstrapping stroke
                 gesture samples based on the kinematic theory of rapid
                 human movements. The user only has to provide a gesture
                 example once, and g3 will create a model of that
                 gesture. Then, by introducing local and global
                 perturbations to the model parameters, g3 generates
                 from tens to thousands of synthetic human-like samples.
                 Through a comprehensive evaluation, we show that
                 synthesized gestures perform equally similar to
                 gestures generated by human users. Ultimately, this
                 work informs our understanding of designing better user
                 interfaces that are driven by gestures.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Minkov:2016:EEU,
  author =       "Einat Minkov",
  title =        "Event Extraction using Structured Learning and Rich
                 Domain Knowledge: Application across Domains and Data
                 Sources",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "16:1--16:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2801131",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "We consider the task of record extraction from text
                 documents, where the goal is to automatically populate
                 the fields of target relations, such as scientific
                 seminars or corporate acquisition events. There are
                 various inferences involved in the record-extraction
                 process, including mention detection, unification, and
                 field assignments. We use structured learning to find
                 the appropriate field-value assignments. Unlike
                 previous works, the proposed approach generates
                 feature-rich models that enable the modeling of domain
                 semantics and structural coherence at all levels and
                 across fields. Given labeled examples, such an approach
                 can, for instance, learn likely event durations and the
                 fact that start times should come before end times.
                 While the inference space is large, effective learning
                 is achieved using a perceptron-style method and simple,
                 greedy beam decoding. A main focus of this article is
                 on practical aspects involved in implementing the
                 proposed framework for real-world applications. We
                 argue and demonstrate that this approach is favorable
                 in conditions of data shift, a real-world setting in
                 which models learned using a limited set of labeled
                 examples are applied to examples drawn from a different
                 data distribution. Much of the framework's robustness
                 is attributed to the modeling of domain knowledge. We
                 describe design and implementation details for the case
                 study of seminar event extraction from email
                 announcements, and discuss design adaptations across
                 different domains and text genres.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Zhang:2016:PAT,
  author =       "Kun Zhang and Jiuyong Li and Elias Bareinboim and
                 Bernhard Sch{\"o}lkopf and Judea Pearl",
  title =        "Preface to the {ACM TIST} Special Issue on Causal
                 Discovery and Inference",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "17:1--17:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2840720",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Shan:2016:SBS,
  author =       "Na Shan and Xiaogang Dong and Pingfeng Xu and Jianhua
                 Guo",
  title =        "Sharp Bounds on Survivor Average Causal Effects When
                 the Outcome Is Binary and Truncated by Death",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "18:1--18:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700498",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In randomized trials with follow-up, outcomes may be
                 undefined for individuals who die before the follow-up
                 is complete. In such settings, Frangakis and Rubin
                 [2002] proposed the ``principal stratum effect'' or
                 ``Survivor Average Causal Effect'' (SACE), which is a
                 fair treatment comparison in the subpopulation that
                 would have survived under either treatment arm. Many of
                 the existing results for estimating the SACE are
                 difficult to carry out in practice. In this article,
                 when the outcome is binary, we apply the symbolic
                 Balke-Pearl linear programming method to derive simple
                 formulas for the sharp bounds on the SACE under the
                 monotonicity assumption commonly used by many
                 researchers.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Chen:2016:SIC,
  author =       "Hua Chen and Peng Ding and Zhi Geng and Xiao-Hua
                 Zhou",
  title =        "Semiparametric Inference of the Complier Average
                 Causal Effect with Nonignorable Missing Outcomes",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "19:1--19:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668135",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Noncompliance and missing data often occur in
                 randomized trials, which complicate the inference of
                 causal effects. When both noncompliance and missing
                 data are present, previous papers proposed moment and
                 maximum likelihood estimators for binary and normally
                 distributed continuous outcomes under the latent
                 ignorable missing data mechanism. However, the latent
                 ignorable missing data mechanism may be violated in
                 practice, because the missing data mechanism may depend
                 directly on the missing outcome itself. Under
                 noncompliance and an outcome-dependent nonignorable
                 missing data mechanism, previous studies showed the
                 identifiability of complier average causal effect for
                 discrete outcomes. In this article, we study the
                 semiparametric identifiability and estimation of
                 complier average causal effect in randomized clinical
                 trials with both all-or-none noncompliance and
                 outcome-dependent nonignorable missing continuous
                 outcomes, and propose a two-step maximum likelihood
                 estimator in order to eliminate the infinite
                 dimensional nuisance parameter. Our method does not
                 need to specify a parametric form for the missing data
                 mechanism. We also evaluate the finite sample property
                 of our method via extensive simulation studies and
                 sensitivity analysis, with an application to a
                 double-blinded psychiatric clinical trial.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Luo:2016:BDI,
  author =       "Peng Luo and Zhi Geng",
  title =        "Bounds on Direct and Indirect Effects of Treatment on
                 a Continuous Endpoint",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "20:1--20:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668134",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Direct effect of a treatment variable on an endpoint
                 variable and indirect effect through a mediate variable
                 are important concepts for understanding a causal
                 mechanism. However, the randomized assignment of
                 treatment is not sufficient for identifying the direct
                 and indirect effects, and extra assumptions and
                 conditions are required, such as the sequential
                 ignorability assumption without unobserved confounders
                 or the sequential potential ignorability assumption.
                 But these assumptions may not be credible in many
                 applications. In this article, we consider the bounds
                 on controlled direct effect, natural direct effect, and
                 natural indirect effect without these extra
                 assumptions. Cai et al. [2008] presented the bounds for
                 the case of a binary endpoint, and we extend their
                 results to the general case for an arbitrary
                 endpoint.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Liu:2016:CDD,
  author =       "Furui Liu and Laiwan Chan",
  title =        "Causal Discovery on Discrete Data with Extensions to
                 Mixture Model",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "21:1--21:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2700477",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In this article, we deal with the causal discovery
                 problem on discrete data. First, we present a causal
                 discovery method for traditional additive noise models
                 that identifies the causal direction by analyzing the
                 supports of the conditional distributions. Then, we
                 present a causal mixture model to address the problem
                 that the function transforming cause to effect varies
                 across the observations. We propose a novel method
                 called Support Analysis (SA) for causal discovery with
                 the mixture model. Experiments using synthetic and real
                 data are presented to demonstrate the performance of
                 our proposed algorithm.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Flaxman:2016:GPI,
  author =       "Seth R. Flaxman and Daniel B. Neill and Alexander J.
                 Smola",
  title =        "{Gaussian} Processes for Independence Tests with
                 Non-iid Data in Causal Inference",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "22:1--22:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2806892",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In applied fields, practitioners hoping to apply
                 causal structure learning or causal orientation
                 algorithms face an important question: which
                 independence test is appropriate for my data? In the
                 case of real-valued iid data, linear dependencies, and
                 Gaussian error terms, partial correlation is
                 sufficient. But once any of these assumptions is
                 modified, the situation becomes more complex.
                 Kernel-based tests of independence have gained
                 popularity to deal with nonlinear dependencies in
                 recent years, but testing for conditional independence
                 remains a challenging problem. We highlight the
                 important issue of non-iid observations: when data are
                 observed in space, time, or on a network, ``nearby''
                 observations are likely to be similar. This fact biases
                 estimates of dependence between variables. Inspired by
                 the success of Gaussian process regression for handling
                 non-iid observations in a wide variety of areas and by
                 the usefulness of the Hilbert--Schmidt Independence
                 Criterion (HSIC), a kernel-based independence test, we
                 propose a simple framework to address all of these
                 issues: first, use Gaussian process regression to
                 control for certain variables and to obtain residuals.
                 Second, use HSIC to test for independence. We
                 illustrate this on two classic datasets, one spatial,
                 the other temporal, that are usually treated as iid. We
                 show how properly accounting for spatial and temporal
                 variation can lead to more reasonable causal graphs. We
                 also show how highly structured data, like images and
                 text, can be used in a causal inference framework using
                 a novel structured input/output Gaussian process
                 formulation. We demonstrate this idea on a dataset of
                 translated sentences, trying to predict the source
                 language.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Fire:2016:LPC,
  author =       "Amy Fire and Song-Chun Zhu",
  title =        "Learning Perceptual Causality from Video",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "23:1--23:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2809782",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Perceptual causality is the perception of causal
                 relationships from observation. Humans, even as
                 infants, form such models from observation of the world
                 around them [Saxe and Carey 2006]. For a deeper
                 understanding, the computer must make similar models
                 through the analogous form of observation: video. In
                 this article, we provide a framework for the
                 unsupervised learning of this perceptual causal
                 structure from video. Our method takes action and
                 object status detections as input and uses heuristics
                 suggested by cognitive science research to produce the
                 causal links perceived between them. We greedily modify
                 an initial distribution featuring independence between
                 potential causes and effects by adding dependencies
                 that maximize information gain. We compile the learned
                 causal relationships into a Causal And-Or Graph, a
                 probabilistic and-or representation of causality that
                 adds a prior to causality. Validated against human
                 perception, experiments show that our method correctly
                 learns causal relations, attributing status changes of
                 objects to causing actions amid irrelevant actions. Our
                 method outperforms Hellinger's $ \chi^2$-statistic by
                 considering hierarchical action selection, and
                 outperforms the treatment effect by discounting
                 coincidental relationships.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Demeshko:2016:NCS,
  author =       "Marina Demeshko and Takashi Washio and Yoshinobu
                 Kawahara and Yuriy Pepyolyshev",
  title =        "A Novel Continuous and Structural {VAR} Modeling
                 Approach and Its Application to Reactor Noise
                 Analysis",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "24:1--24:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2710025",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "A vector autoregressive model in discrete time domain
                 (DVAR) is often used to analyze continuous time,
                 multivariate, linear Markov systems through their
                 observed time series data sampled at discrete
                 timesteps. Based on previous studies, the DVAR model is
                 supposed to be a noncanonical representation of the
                 system, that is, it does not correspond to a unique
                 system bijectively. However, in this article, we
                 characterize the relations of the DVAR model with its
                 corresponding Structural Vector AR (SVAR) and
                 Continuous Time Vector AR (CTVAR) models through a
                 finite difference method across continuous and discrete
                 time domain. We further clarify that the DVAR model of
                 a continuous time, multivariate, linear Markov system
                 is canonical under a highly generic condition. Our
                 analysis shows that we can uniquely reproduce its SVAR
                 and CTVAR models from the DVAR model. Based on these
                 results, we propose a novel Continuous and Structural
                 Vector Autoregressive (CSVAR) modeling approach to
                 derive the SVAR and the CTVAR models from their DVAR
                 model empirically derived from the observed time series
                 of continuous time linear Markov systems. We
                 demonstrate its superior performance through some
                 numerical experiments on both artificial and real-world
                 data.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hours:2016:CAS,
  author =       "Hadrien Hours and Ernst Biersack and Patrick Loiseau",
  title =        "A Causal Approach to the Study of {TCP} Performance",
  journal =      j-TIST,
  volume =       "7",
  number =       "2",
  pages =        "25:1--25:??",
  month =        jan,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2770878",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jan 25 06:10:36 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Communication networks are complex systems whose
                 operation relies on a large number of components that
                 work together to provide services to end users. As the
                 quality of these services depends on different
                 parameters, understanding how each of them impacts the
                 final performance of a service is a challenging but
                 important problem. However, intervening on individual
                 factors to evaluate the impact of the different
                 parameters is often impractical due to the high cost of
                 intervention in a network. It is, therefore, desirable
                 to adopt a formal approach to understand the role of
                 the different parameters and to predict how a change in
                 any of these parameters will impact performance. The
                 approach of causality pioneered by J. Pearl provides a
                 powerful framework to investigate these questions. Most
                 of the existing theory is non-parametric and does not
                 make any assumption on the nature of the system under
                 study. However, most of the implementations of causal
                 model inference algorithms and most of the examples of
                 usage of a causal model to predict intervention rely on
                 assumptions such linearity, normality, or discrete
                 data. In this article, we present a methodology to
                 overcome the challenges of working with real-world data
                 and extend the application of causality to complex
                 systems in the area of telecommunication networks, for
                 which assumptions of normality, linearity and discrete
                 data do no hold. Specifically, we study the performance
                 of TCP, which is the prevalent protocol for reliable
                 end-to-end transfer in the Internet. Analytical models
                 of the performance of TCP exist, but they take into
                 account the state of network only and disregard the
                 impact of the application at the sender and the
                 receiver, which often influences TCP performance. To
                 address this point, we take as application the file
                 transfer protocol (FTP), which uses TCP for reliable
                 transfer. Studying a well-understood protocol such as
                 TCP allows us to validate our approach and compare its
                 results to previous studies. We first present and
                 evaluate our methodology using TCP traffic obtained via
                 network emulation, which allows us to experimentally
                 validate the prediction of an intervention. We then
                 apply the methodology to real-world TCP traffic sent
                 over the Internet. Throughout the article, we compare
                 the causal approach for studying TCP performance to
                 other approaches such as analytical modeling or
                 simulation and and show how they can complement each
                 other.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Belem:2016:BRE,
  author =       "Fabiano M. Bel{\'e}m and Carolina S. Batista and
                 Rodrygo L. T. Santos and Jussara M. Almeida and Marcos
                 A. Gon{\c{c}}alves",
  title =        "Beyond Relevance: Explicitly Promoting Novelty and
                 Diversity in Tag Recommendation",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "26:1--26:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2801130",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "The design and evaluation of tag recommendation
                 methods has historically focused on maximizing the
                 relevance of the suggested tags for a given object,
                 such as a movie or a song. However, relevance by itself
                 may not be enough to guarantee recommendation
                 usefulness. Promoting novelty and diversity in tag
                 recommendation not only increases the chances that the
                 user will select ``some'' of the recommended tags but
                 also promotes complementary information (i.e., tags),
                 which helps to cover multiple aspects or topics related
                 to the target object. Previous work has addressed the
                 tag recommendation problem by exploiting at most two of
                 the following aspects: (1) relevance, (2) explicit
                 topic diversity, and (3) novelty. In contrast, here we
                 tackle these three aspects conjointly, by introducing
                 two new tag recommendation methods that cover all three
                 aspects of the problem at different levels. Our first
                 method, called Random Forest with topic-related
                 attributes, or RF$_t$, extends a relevance-driven tag
                 recommender based on the Random Forest ( RF )
                 learning-to-rank method by including new tag attributes
                 to capture the extent to which a candidate tag is
                 related to the topics of the target object. This
                 solution captures topic diversity as well as novelty at
                 the attribute level while aiming at maximizing
                 relevance in its objective function. Our second method,
                 called Explicit Tag Recommendation Diversifier with
                 Novelty Promotion, or xTReND, reranks the
                 recommendations provided by any tag recommender to
                 jointly promote relevance, novelty, and topic
                 diversity. We use RF$_t$ as a basic recommender applied
                 before the reranking, thus building a solution that
                 addresses the problem at both attribute and objective
                 levels. Furthermore, to enable the use of our solutions
                 on applications in which category information is
                 unavailable, we investigate the suitability of using
                 latent Dirichlet allocation (LDA) to automatically
                 generate topics for objects. We evaluate all tag
                 recommendation approaches using real data from five
                 popular Web 2.0 applications. Our results show that
                 RF$_t$ greatly outperforms the relevance-driven RF
                 baseline in diversity while producing gains in
                 relevance as well. We also find that our new xTReND
                 reranker obtains considerable gains in both novelty and
                 relevance when compared to that same baseline while
                 keeping the same relevance levels. Furthermore,
                 compared to our previous reranker method, xTReD, which
                 does not consider novelty, xTReND is also quite
                 effective, improving the novelty of the recommended
                 tags while keeping similar relevance and diversity
                 levels in most datasets and scenarios. Comparing our
                 two new proposals, we find that xTReND considerably
                 outperforms RF$_t$ in terms of novelty and diversity
                 with only small losses (under 4\%) in relevance.
                 Overall, considering the trade-off among relevance,
                 novelty, and diversity, our results demonstrate the
                 superiority of xTReND over the baselines and the
                 proposed alternative, RF$_t$. Finally, the use of
                 automatically generated latent topics as an alternative
                 to manually labeled categories also provides
                 significant improvements, which greatly enhances the
                 applicability of our solutions to applications where
                 the latter is not available.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Paik:2016:PDM,
  author =       "Jiaul H. Paik",
  title =        "Parameterized Decay Model for Information Retrieval",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "27:1--27:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2800794",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "This article proposes a term weighting scheme for
                 measuring query-document similarity that attempts to
                 explicitly model the dependency between separate
                 occurrences of a term in a document. The assumption is
                 that, if a term appears once in a document, it is more
                 likely to appear again in the same document. Thus, as
                 the term appears again and again, the information
                 content of the subsequent occurrences decreases
                 gradually, since they are more predictable. We
                 introduce a parameterized decay function to model this
                 assumption, where the initial contribution of the term
                 can be determined using any reasonable term
                 discrimination factor. The effectiveness of the
                 proposed model is evaluated on a number of recent web
                 test collections of varying nature. The experimental
                 results show that the proposed model significantly
                 outperforms a number of well known retrieval models
                 including a recently proposed strong Term Frequency and
                 Inverse Document Frequency (TF-IDF) model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2016:MCA,
  author =       "Zhifeng Li and Dihong Gong and Qiang Li and Dacheng
                 Tao and Xuelong Li",
  title =        "Mutual Component Analysis for Heterogeneous Face
                 Recognition",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "28:1--28:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2807705",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Heterogeneous face recognition, also known as
                 cross-modality face recognition or intermodality face
                 recognition, refers to matching two face images from
                 alternative image modalities. Since face images from
                 different image modalities of the same person are
                 associated with the same face object, there should be
                 mutual components that reflect those intrinsic face
                 characteristics that are invariant to the image
                 modalities. Motivated by this rationality, we propose a
                 novel approach called Mutual Component Analysis (MCA)
                 to infer the mutual components for robust heterogeneous
                 face recognition. In the MCA approach, a generative
                 model is first proposed to model the process of
                 generating face images in different modalities, and
                 then an Expectation Maximization (EM) algorithm is
                 designed to iteratively learn the model parameters. The
                 learned generative model is able to infer the mutual
                 components (which we call the hidden factor, where
                 hidden means the factor is unreachable and invisible,
                 and can only be inferred from observations) that are
                 associated with the person's identity, thus enabling
                 fast and effective matching for cross-modality face
                 recognition. To enhance recognition performance, we
                 propose an MCA-based multiclassifier framework using
                 multiple local features. Experimental results show that
                 our new approach significantly outperforms the
                 state-of-the-art results on two typical application
                 scenarios: sketch-to-photo and infrared-to-visible face
                 recognition.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Ye:2016:GIL,
  author =       "Jintao Ye and Zhao Yan Ming and Tat Seng Chua",
  title =        "Generating Incremental Length Summary Based on
                 Hierarchical Topic Coverage Maximization",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "29:1--29:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2809433",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Document summarization is playing an important role in
                 coping with information overload on the Web. Many
                 summarization models have been proposed recently, but
                 few try to adjust the summary length and sentence order
                 according to application scenarios. With the popularity
                 of handheld devices, presenting key information first
                 in summaries of flexible length is of great convenience
                 in terms of faster reading and decision-making and
                 network consumption reduction. Targeting this problem,
                 we introduce a novel task of generating summaries of
                 incremental length. In particular, we require that the
                 summaries should have the ability to automatically
                 adjust the coverage of general-detailed information
                 when the summary length varies. We propose a novel
                 summarization model that incrementally maximizes topic
                 coverage based on the document's hierarchical topic
                 model. In addition to the standard Rouge-1 measure, we
                 define a new evaluation metric based on the similarity
                 of the summaries' topic coverage distribution in order
                 to account for sentence order and summary length.
                 Extensive experiments on Wikipedia pages, DUC 2007, and
                 general noninverted writing style documents from
                 multiple sources show the effectiveness of our proposed
                 approach. Moreover, we carry out a user study on a
                 mobile application scenario to show the usability of
                 the produced summary in terms of improving judgment
                 accuracy and speed, as well as reducing the reading
                 burden and network traffic.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Yang:2016:PCM,
  author =       "Dingqi Yang and Daqing Zhang and Bingqing Qu",
  title =        "Participatory Cultural Mapping Based on Collective
                 Behavior Data in Location-Based Social Networks",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "30:1--30:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2814575",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Culture has been recognized as a driving impetus for
                 human development. It co-evolves with both human belief
                 and behavior. When studying culture, Cultural Mapping
                 is a crucial tool to visualize different aspects of
                 culture (e.g., religions and languages) from the
                 perspectives of indigenous and local people. Existing
                 cultural mapping approaches usually rely on large-scale
                 survey data with respect to human beliefs, such as
                 moral values. However, such a data collection method
                 not only incurs a significant cost of both human
                 resources and time, but also fails to capture human
                 behavior, which massively reflects cultural
                 information. In addition, it is practically difficult
                 to collect large-scale human behavior data.
                 Fortunately, with the recent boom in Location-Based
                 Social Networks (LBSNs), a considerable number of users
                 report their activities in LBSNs in a participatory
                 manner, which provides us with an unprecedented
                 opportunity to study large-scale user behavioral data.
                 In this article, we propose a participatory cultural
                 mapping approach based on collective behavior in LBSNs.
                 First, we collect the participatory sensed user
                 behavioral data from LBSNs. Second, since only local
                 users are eligible for cultural mapping, we propose a
                 progressive ``home'' location identification method to
                 filter out ineligible users. Third, by extracting three
                 key cultural features from daily activity, mobility,
                 and linguistic perspectives, respectively, we propose a
                 cultural clustering method to discover cultural
                 clusters. Finally, we visualize the cultural clusters
                 on the world map. Based on a real-world LBSN dataset,
                 we experimentally validate our approach by conducting
                 both qualitative and quantitative analysis on the
                 generated cultural maps. The results show that our
                 approach can subtly capture cultural features and
                 generate representative cultural maps that correspond
                 well with traditional cultural maps based on survey
                 data.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Jia:2016:LPT,
  author =       "Yantao Jia and Yuanzhuo Wang and Xiaolong Jin and
                 Xueqi Cheng",
  title =        "Location Prediction: a Temporal-Spatial {Bayesian}
                 Model",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "31:1--31:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2816824",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "In social networks, predicting a user's location
                 mainly depends on those of his/her friends, where the
                 key lies in how to select his/her most influential
                 friends. In this article, we analyze the theoretically
                 maximal accuracy of location prediction based on
                 friends' locations and compare it with the practical
                 accuracy obtained by the state-of-the-art location
                 prediction methods. Upon observing a big gap between
                 the theoretical and practical accuracy, we propose a
                 new strategy for selecting influential friends in order
                 to improve the practical location prediction accuracy.
                 Specifically, several features are defined to measure
                 the influence of the friends on a user's location,
                 based on which we put forth a sequential
                 random-walk-with-restart procedure to rank the friends
                 of the user in terms of their influence. By dynamically
                 selecting the top N most influential friends of the
                 user per time slice, we develop a temporal-spatial
                 Bayesian model to characterize the dynamics of friends'
                 influence for location prediction. Finally, extensive
                 experimental results on datasets of real social
                 networks demonstrate that the proposed influential
                 friend selection method and temporal-spatial Bayesian
                 model can significantly improve the accuracy of
                 location prediction.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2016:VFE,
  author =       "Xiaoyan Li and Tongliang Liu and Jiankang Deng and
                 Dacheng Tao",
  title =        "Video Face Editing Using Temporal-Spatial-Smooth
                 Warping",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "32:1--32:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2819000",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "Editing faces in videos is a popular yet challenging
                 task in computer vision and graphics that encompasses
                 various applications, including facial attractiveness
                 enhancement, makeup transfer, face replacement, and
                 expression manipulation. Directly applying the existing
                 warping methods to video face editing has the major
                 problem of temporal incoherence in the synthesized
                 videos, which cannot be addressed by simply employing
                 face tracking techniques or manual interventions, as it
                 is difficult to eliminate the subtly temporal
                 incoherence of the facial feature point localizations
                 in a video sequence. In this article, we propose a
                 temporal-spatial-smooth warping (TSSW) method to
                 achieve a high temporal coherence for video face
                 editing. TSSW is based on two observations: (1) the
                 control lattices are critical for generating warping
                 surfaces and achieving the temporal coherence between
                 consecutive video frames, and (2) the temporal
                 coherence and spatial smoothness of the control
                 lattices can be simultaneously and effectively
                 preserved. Based upon these observations, we impose the
                 temporal coherence constraint on the control lattices
                 on two consecutive frames, as well as the spatial
                 smoothness constraint on the control lattice on the
                 current frame. TSSW calculates the control lattice (in
                 either the horizontal or vertical direction) by
                 updating the control lattice (in the corresponding
                 direction) on its preceding frame, i.e., minimizing a
                 novel energy function that unifies a data-driven term,
                 a smoothness term, and feature point constraints. The
                 contributions of this article are twofold: (1) we
                 develop TSSW, which is robust to the subtly temporal
                 incoherence of the facial feature point localizations
                 and is effective to preserve the temporal coherence and
                 spatial smoothness of the control lattices for editing
                 faces in videos, and (2) we present a new unified video
                 face editing framework that is capable for improving
                 the performances of facial attractiveness enhancement,
                 makeup transfer, face replacement, and expression
                 manipulation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Li:2016:MNS,
  author =       "Zechao Li and Jinhui Tang and Xueming Wang and Jing
                 Liu and Hanqing Lu",
  title =        "Multimedia News Summarization in Search",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "33:1--33:??",
  month =        apr,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2822907",
  ISSN =         "2157-6904 (print), 2157-6912 (electronic)",
  ISSN-L =       "2157-6904",
  bibdate =      "Mon Jun 20 11:24:25 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/tist.bib",
  abstract =     "It is a necessary but challenging task to relieve
                 users from the proliferative news information and allow
                 them to quickly and comprehensively master the
                 information of the whats and hows that are happening in
                 the world every day. In this article, we develop a
                 novel approach of multimedia news summarization for
                 searching results on the Internet, which uncovers the
                 underlying topics among query-related news information
                 and threads the news events within each topic to
                 generate a query-related brief overview. First, the
                 hierarchical latent Dirichlet allocation (hLDA) model
                 is introduced to discover the hierarchical topic
                 structure from query-related news documents, and a new
                 approach based on the weighted aggregation and max
                 pooling is proposed to identify one representative news
                 article for each topic. One representative image is
                 also selected to visualize each topic as a complement
                 to the text information. Given the representative
                 documents selected for each topic, a time-bias maximum
                 spanning tree (MST) algorithm is proposed to thread
                 them into a coherent and compact summary of their
                 parent topic. Finally, we design a friendly interface
                 to present users with the hierarchical summarization of
                 their required news information. Extensive experiments
                 conducted on a large-scale news dataset collected from
                 multiple news Web sites demonstrate the encouraging
                 performance of the proposed solution for news
                 summarization in news retrieval.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Intell. Syst. Technol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Intelligent Systems and Technology
                 (TIST)",
  journal-URL =  "http://portal.acm.org/citation.cfm?id=J1318",
}

@Article{Hardegger:2016:SUB,
  author =       "Michael Hardegger and Daniel Roggen and Alberto
                 Calatroni and Gerhard Tr{\"o}ster",
  title =        "{S-SMART}: a Unified {Bayesian} Framework for
                 Simultaneous Semantic Mapping, Activity Recognition,
                 and Tracking",
  journal =      j-TIST,
  volume =       "7",
  number =       "3",
  pages =        "34:1--34:??",
  month =