Context. Defects such as ambiguity and incompleteness are pervasive in software requirements, often due to the limited time that practitioners devote to writing good requirements. Objective.We study whether a synergy between humans’ analytic capabilities and natural language processing is an effective approach for quickly identifying near-synonyms, a possible source of terminological ambiguity. Method.We propose a tool-supported approach that blends information visualization with two natural language processing techniques: conceptual model extraction and semantic similarity. We evaluate the precision and recall of our approach compared to a pen-and-paper manual inspection session through a controlled quasi-experiment that involves 57 participants organized into 28 groups, each group working on one real-world requirements data set. Results.The experimental results indicate that manual inspection delivers higher recall (statistically significant with p ≤ 0.01) and non-significantly higher precision. Based on qualitative observations, we analyze the quantitative results and suggest interpretations that explain the advantages and disadvantages of each approach. Conclusions.Our experiment confirms conventional wisdom in requirements engineering: identifying terminological ambiguities is time consuming, even when with tool support; and it is hard to determine whether a near-synonym may challenge the correct development of a software system. The results suggest that the most effective approach may be a combination of manual inspection with an improved version of our tool.
%0 Journal Article
%1 journals/infsof/DalpiazSBAL19
%A Dalpiaz, Fabiano
%A Schalk, Ivor Van Der
%A Brinkkemper, Sjaak
%A Aydemir, Fatma Basak
%A Lucassen, Garm
%D 2019
%I Elsevier BV
%J Information & Software Technology
%K development orchestration requirements
%P 3-16
%R 10.1016/j.infsof.2018.12.007
%T Detecting terminological ambiguity in user stories: Tool and experimentation.
%U http://www.staff.science.uu.nl/~dalpi001/papers/dalp-scha-brin-ayde-luca-19-ist.pdf
%V 110
%X Context. Defects such as ambiguity and incompleteness are pervasive in software requirements, often due to the limited time that practitioners devote to writing good requirements. Objective.We study whether a synergy between humans’ analytic capabilities and natural language processing is an effective approach for quickly identifying near-synonyms, a possible source of terminological ambiguity. Method.We propose a tool-supported approach that blends information visualization with two natural language processing techniques: conceptual model extraction and semantic similarity. We evaluate the precision and recall of our approach compared to a pen-and-paper manual inspection session through a controlled quasi-experiment that involves 57 participants organized into 28 groups, each group working on one real-world requirements data set. Results.The experimental results indicate that manual inspection delivers higher recall (statistically significant with p ≤ 0.01) and non-significantly higher precision. Based on qualitative observations, we analyze the quantitative results and suggest interpretations that explain the advantages and disadvantages of each approach. Conclusions.Our experiment confirms conventional wisdom in requirements engineering: identifying terminological ambiguities is time consuming, even when with tool support; and it is hard to determine whether a near-synonym may challenge the correct development of a software system. The results suggest that the most effective approach may be a combination of manual inspection with an improved version of our tool.
@article{journals/infsof/DalpiazSBAL19,
abstract = {Context. Defects such as ambiguity and incompleteness are pervasive in software requirements, often due to the limited time that practitioners devote to writing good requirements. Objective.We study whether a synergy between humans’ analytic capabilities and natural language processing is an effective approach for quickly identifying near-synonyms, a possible source of terminological ambiguity. Method.We propose a tool-supported approach that blends information visualization with two natural language processing techniques: conceptual model extraction and semantic similarity. We evaluate the precision and recall of our approach compared to a pen-and-paper manual inspection session through a controlled quasi-experiment that involves 57 participants organized into 28 groups, each group working on one real-world requirements data set. Results.The experimental results indicate that manual inspection delivers higher recall (statistically significant with p ≤ 0.01) and non-significantly higher precision. Based on qualitative observations, we analyze the quantitative results and suggest interpretations that explain the advantages and disadvantages of each approach. Conclusions.Our experiment confirms conventional wisdom in requirements engineering: identifying terminological ambiguities is time consuming, even when with tool support; and it is hard to determine whether a near-synonym may challenge the correct development of a software system. The results suggest that the most effective approach may be a combination of manual inspection with an improved version of our tool.},
added-at = {2019-09-26T13:50:13.000+0200},
author = {Dalpiaz, Fabiano and Schalk, Ivor Van Der and Brinkkemper, Sjaak and Aydemir, Fatma Basak and Lucassen, Garm},
biburl = {https://www.bibsonomy.org/bibtex/21d915f75c9b3d0e7e2ba821633a09899/ispma},
doi = {10.1016/j.infsof.2018.12.007},
interhash = {7a0fdae2b1f7157d1c1e8291aa8322f2},
intrahash = {1d915f75c9b3d0e7e2ba821633a09899},
journal = {Information & Software Technology},
keywords = {development orchestration requirements},
month = jun,
pages = {3-16},
publisher = {Elsevier {BV}},
timestamp = {2019-09-26T13:52:21.000+0200},
title = {Detecting terminological ambiguity in user stories: Tool and experimentation.},
url = {http://www.staff.science.uu.nl/~dalpi001/papers/dalp-scha-brin-ayde-luca-19-ist.pdf},
volume = 110,
year = 2019
}