@inproceedings{55413be9435c4be8bdafb9e653cb700e,
title = "Revisiting the conclusion instability issue in software effort estimation",
abstract = "Conclusion instability is the absence of observing the same effect under varying experimental conditions. Deep Neural Network (DNN) and ElasticNet software effort estimation (SEE) models were applied to two SEE datasets with the view of resolving the conclusion instability issue and assessing the suitability of ElasticNet as a viable SEE benchmark model. Results were mixed as both model types attain conclusion stability for the Kitchenham dataset whilst conclusion instability existed in the Desharnais dataset. ElasticNet was outperformed by DNN and as such it is not recommended to be used as a SEE benchmark model.",
keywords = "Conclusion Instability, Deep Neural Network, ElasticNet, Prediction model, Software Effort Estimation",
author = "Bosu, {Michael Franklin} and Solomon Mensah and Kwabena Bennin and Diab Abuaiadah",
note = "Publisher Copyright: {\textcopyright} 2018 Universitat zu Koln. All rights reserved.; 30th International Conference on Software Engineering and Knowledge Engineering, SEKE 2018 ; Conference date: 01-07-2018 Through 03-07-2018",
year = "2018",
doi = "10.18293/SEKE2018-126",
language = "English",
series = "Proceedings of the International Conference on Software Engineering and Knowledge Engineering, SEKE",
publisher = "Knowledge Systems Institute Graduate School",
pages = "368--371",
booktitle = "Proceedings - SEKE 2018",
}