2023
Nembhard, Fitzroy D; Slhoub, Khaled A; Carvalho, Marco M
An Agent-Based Approach Toward Smart Software Testing Conference
Proceedings of the Future Technologies Conference, vol. 814, Lecture Notes in Networks and Systems book series (LNNS) Springer Nature Switzerland, 2023, ISBN: 978-3-031-47451-4.
Abstract | Links | BibTeX | Tags: AOSE, NLP, software testing, virtual assistant
@conference{nokey,
title = {An Agent-Based Approach Toward Smart Software Testing},
author = {Fitzroy D Nembhard and Khaled A Slhoub and Marco M Carvalho},
editor = {K. Arai},
doi = {https://doi.org/10.1007/978-3-031-47451-4_21},
isbn = {978-3-031-47451-4},
year = {2023},
date = {2023-11-01},
urldate = {2023-11-01},
booktitle = {Proceedings of the Future Technologies Conference},
volume = {814},
publisher = {Springer Nature Switzerland},
series = { Lecture Notes in Networks and Systems book series (LNNS)},
abstract = {As the field of software testing continues to advance, cooperative software testing and analysis has been proposed as a new methodology to help combat the challenges involved with performing highly effective software testing. Motivated by the understanding that auto-testing systems on their own are not powerful enough to address complications in testing complex real-world software, this model involves human-machine and machine-machine cooperation to make automated software testing processes more interactive and user-friendly. It is with this in mind that we propose an agent-based approach to software testing that involves teaming humans with virtual assistants on smart devices to help coordinate the tasks associated with testing complex real-world software. Currently, virtual assistants are widely used for interpersonal tasks such as purchasing items from restaurants, interfacing with mobile applications to create events and reminders, and composing and sending messages on behalf of the user. In this research, we create an agent-based framework and use it to demonstrate that a virtual assistant on a smart device can also be utilized to work closely with software testers to efficiently and effectively verify software functionality, generate reports, and communicate results with developers. We utilize unit testing to evaluate our proposed methodology by applying it to a set of Java projects. Our results show that virtual agents can be used to work with humans to coordinate tasks associated with unit-testing software.},
keywords = {AOSE, NLP, software testing, virtual assistant},
pubstate = {published},
tppubtype = {conference}
}
2019
Slhoub, Khaled; Carvalho, Marco; Nembhard, Fitzroy
Evaluation and Comparison of Agent-Oriented Methodologies: A Software Engineering Viewpoint Proceedings Article
In: 2019 IEEE International Systems Conference (SysCon), pp. 1-8, 2019.
Abstract | Links | BibTeX | Tags: agent, AOSE, MaSE, PASSI, Prometheus, software engineering, software quality, software requirements, standards, SWEBOK
@inproceedings{AOSEEvaluation,
title = {Evaluation and Comparison of Agent-Oriented Methodologies: A Software Engineering Viewpoint},
author = {Khaled Slhoub and Marco Carvalho and Fitzroy Nembhard},
doi = {10.1109/SYSCON.2019.8836962},
year = {2019},
date = {2019-04-08},
urldate = {2019-04-08},
booktitle = {2019 IEEE International Systems Conference (SysCon)},
pages = {1-8},
abstract = {Numerous agent-oriented methodologies that offer a rich pool of resources to support developers of agent-based systems have been proposed. However, the use of existing methodologies in industrial settings is still limited due to the large volume of methodologies, diversity of covered scopes, ambiguity in concepts, and lack of maturity. This makes it difficult for agent technology practitioners to choose the appropriate methodology that best fits their given development context. To eliminate such agent-based development bottleneck, it is important to introduce suitable methods for evaluating, comparing, and classifying agent-oriented methodologies in order to leverage their usage among practitioners. Having systems to evaluate methodologies can effectively help developers better understand existing methodologies, realize their benefits, outline their pros and cons, and assist practitioners with selecting the best-fit methodology for a specific agent-based project. In response, this paper proposes a novel criteria-based evaluation that is influenced by software engineering practices to assess and compare agent-oriented methodologies. The proposed evaluation is derived from the software engineering body of knowledge (SWEBOK) and provides a simplified method to assess the coverage degree of an agent-oriented methodology with respect to major software knowledge areas such as the requirements and testing phases. We demonstrate the applicability of the proposed evaluation by applying it to three agent-oriented methodologies (PASSI, MaSE, and Prometheus) in the software engineering requirements and testing phases.},
keywords = {agent, AOSE, MaSE, PASSI, Prometheus, software engineering, software quality, software requirements, standards, SWEBOK},
pubstate = {published},
tppubtype = {inproceedings}
}