Last update: Sun Oct 15 02:29:09 MDT 2017
@Article{Jayal:2009:PLA,
author = "Ambikesh Jayal and Martin Shepperd",
title = "The Problem of Labels in {E}-Assessment of Diagrams",
journal = j-JERIC,
volume = "8",
number = "4",
pages = "12:1--12:??",
month = jan,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1482348.1482351",
ISSN = "1531-4278",
bibdate = "Mon Feb 2 16:27:27 MST 2009",
bibsource = "http://www.acm.org/pubs/contents/journals/jeric/;
http://www.math.utah.edu/pub/tex/bib/jeric.bib",
abstract = "In this article we explore a problematic aspect of
automated assessment of diagrams. Diagrams have partial
and sometimes inconsistent semantics. Typically much of
the meaning of a diagram resides in the labels;
however, the choice of labeling is largely
unrestricted. This means a correct solution may utilize
differing yet semantically equivalent labels to the
specimen solution. With human marking this problem can
be easily overcome. Unfortunately with e-assessment
this is challenging. We empirically explore the scale
of the problem of synonyms by analyzing 160 student
solutions to a UML task. From this we find that
cumulative growth of synonyms only shows a limited
tendency to reduce at the margin despite using a range
of text processing algorithms such as stemming and
auto-correction of spelling errors. This finding has
significant implications for the ease in which we may
develop future e-assessment systems of diagrams, in
that the need for better algorithms for assessing label
semantic similarity becomes inescapable.",
acknowledgement = ack-nhfb,
articleno = "12",
keywords = "diagrams; E-assessment",
}