-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSE4AI.bib
6855 lines (6387 loc) · 829 KB
/
SE4AI.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@misc{:17445,
title = {Software {{Engineering}} for {{AI-Enabled Systems}} ({{SE4AI}})},
urldate = {2022-12-13},
howpublished = {https://ckaestne.github.io/seai/S2020/},
file = {/Users/guru/Zotero/storage/JM2WJ96K/S2020.html}
}
@article{abdulqadder2020:multilayered,
title = {Multi-Layered Intrusion Detection and Prevention in the {{SDN}}/{{NFV}} Enabled Cloud of {{5G}} Networks Using {{AI-based}} Defense Mechanisms},
author = {Abdulqadder, Ihsan H and Zhou, Shijie and Zou, Deqing and Aziz, Israa T. and Akber, Syed Muhammad Abrar},
year = {2020},
month = oct,
journal = {Computer Networks},
volume = {179},
pages = {107364},
issn = {1389-1286},
doi = {10.1016/j.comnet.2020.107364},
urldate = {2023-01-15},
abstract = {Software defined networking (SDN), network function virtualization (NFV), and cloud computing are receiving significant attention in 5G networks. However, this attention creates a new challenge for security provisioning in these integrated technologies. Research in the field of SDN, NFV, cloud computing, and 5G has recently focused on the intrusion detection and prevention system (IDPS). Existing IDPS solutions are inadequate, which could cause large resource wastage and several security threats. To alleviate security issues, timely detection of an attacker is important. Thus, in this paper, we propose a novel approach that is referred to as multilayered intrusion detection and prevention (ML-IDP) in an SDN/NFV-enabled cloud of 5G networks. The proposed approach defends against security attacks using artificial intelligence (AI). In this paper, we employed five layers: data acquisition layer, switches layer, domain controllers (DC) layer, smart controller (SC) layer, and virtualization layer (NFV infrastructure). User authentication is held in the first layer using the Four-Q-Curve algorithm. To address the flow table overloading attack in the switches layer, the game theory approach, which is executed in the IDP agent, is proposed. The involvement of the IDP agent is to completely avoid a flow table overloading attack by a deep reinforcement learning algorithm, and thus, it updates the current state of all switches. In the DC layer, packets are processed and classified into two classes (normal and suspicious) by a Shannon Entropy function. Normal packets are forwarded to the cloud via the SC. Suspicious packets are sent to the VNF using a growing multiple self-organization map (GM-SOM). The proposed ML-IDP system is evaluated using NS3.26 for different security attacks, including IP Spoofing, flow table overloading, DDoS, Control Plane Saturation, and host location hijacking. From the experiment results, we proved that the ML-IDP with AI-based defense mechanisms effectively detects and prevents attacks.},
langid = {english},
keywords = {And artificial intelligence,Intrusion detection and prevention,Multilayered architecture,SDN/NFV Cloud of 5G},
file = {/Users/guru/Zotero/storage/LAER2FGT/S1389128619310205.html}
}
@inproceedings{Abdulqadder2020499,
type = {Conference Paper},
title = {Bloc-Sec: {{Blockchain-based}} Lightweight Security Architecture for {{5G}}/{{B5G}} Enabled {{SDN}}/{{NFV}} Cloud of {{IoT}}},
author = {Abdulqadder, Ihsan H. and Zhou, Shijie and Zou, Deqing and Aziz, Israa T. and Akber, Syed Muhammad Abrar},
year = {2020},
series = {International {{Conference}} on {{Communication Technology Proceedings}}, {{ICCT}}},
volume = {2020-October},
pages = {499--507},
doi = {10.1109/ICCT50939.2020.9295823},
abstract = {Lightweight security provisioning is a recent topic in resource-constrained software-defined networking (SDN). Integration between SDN, internet of things (IoT), network function virtualization (NFV) provides massive application services for 5G/B5G communications. Current state-of-the-art security solutions were caused by higher resource consumption, weaker key distribution, and forged credentials. In this paper, we proposed a blockchain-based lightweight security architecture (Bloc-Sec) in SDN/NFV that enabled cloud of IoT networks with 5G/B5G communication. Firstly, we authenticate all IoT devices to the blockchain server using multiple factors Blake-256 hashing algorithm. Secondly, we select the optimal virtual network function (VNF) using the cuttlefish optimization algorithm. Thirdly, blockchain is invoked to keep store the hashed flow rules that are deployed in VNF. Fourthly, the controller is involved in the packet classification by proposing the packet header inspection and packet content inspection using spiking dual fuzzy neural networks. For testing, NS3.26 is implemented and the performance evaluated. \textcopyright{} 2020 IEEE.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{abubakar2020interplay,
title = {Interplay of Machine Learning and Software Engineering for Quality Estimations},
booktitle = {2020 International Conference on Communications, Computing, Cybersecurity, and Informatics ({{CCCI}})},
author = {Abubakar, Hamza and Obaidat, Mohammad S and Gupta, Aaryan and Bhattacharya, Pronaya and Tanwar, Sudeep},
year = {2020},
pages = {1--6},
organization = {{IEEE}},
file = {/Users/guru/Zotero/storage/L5Z33RBF/Abubakar et al. - 2020 - Interplay of machine learning and software enginee.pdf}
}
@inproceedings{aggarwal2019user,
title = {User Privacy Risk Analysis for the {{Internet}} of {{Things}}},
booktitle = {2019 Sixth International Conference on Internet of Things: {{Systems}}, Management and Security ({{IOTSMS}})},
author = {Aggarwal, Akash and Asif, Waqar and Azam, Habibul and Markovic, Milan and Rajarajan, Muttukrishnan and Edwards, Peter},
year = {2019},
pages = {259--264},
organization = {{IEEE}}
}
@inproceedings{Agrawal2020,
type = {Conference Paper},
title = {The next Generation of Human-Drone Partnerships: {{Co-designing}} an Emergency Response System},
author = {Agrawal, Ankit and Abraham, Sophia J. and Burger, Benjamin and Christine, Chichi and Fraser, Luke and Hoeksema, John M. and Hwang, Sarah and Travnik, Elizabeth and Kumar, Shreya and Scheirer, Walter and {Cleland-Huang}, Jane and Vierhauser, Michael and Bauer, Ryan and Cox, Steve},
year = {2020},
series = {Conference on {{Human Factors}} in {{Computing Systems}} - {{Proceedings}}},
doi = {10.1145/3313831.3376825},
abstract = {The use of semi-autonomous Unmanned Aerial Vehicles (UAV) to support emergency response scenarios, such as fire surveillance and search and rescue, offers the potential for huge societal benefits. However, designing an effective solution in this complex domain represents a "wicked design" problem, requiring a careful balance between trade-offs associated with drone autonomy versus human control, mission functionality versus safety, and the diverse needs of different stakeholders. This paper focuses on designing for situational awareness (SA) using a scenario-driven, participatory design process. We developed SA cards describing six common design-problems, known as SA demons, and three new demons of importance to our domain. We then used these SA cards to equip domain experts with SA knowledge so that they could more fully engage in the design process. We designed a potentially reusable solution for achieving SA in multi-stakeholder, multi-UAV, emergency response applications. \textcopyright{} 2020 ACM.},
publication_stage = {Final},
source = {Scopus}
}
@article{aheleroff2021:mass,
title = {Mass {{Personalisation}} as a {{Service}} in {{Industry}} 4.0: {{A Resilient Response Case Study}}},
shorttitle = {Mass {{Personalisation}} as a {{Service}} in {{Industry}} 4.0},
author = {Aheleroff, Shohin and Mostashiri, Naser and Xu, Xun and Zhong, Ray Y.},
year = {2021},
month = oct,
journal = {Advanced Engineering Informatics},
volume = {50},
pages = {101438},
issn = {1474-0346},
doi = {10.1016/j.aei.2021.101438},
urldate = {2023-01-15},
abstract = {The Fourth Industrial Revolution (Industry 4.0) leads to mass personalisation as an emerging manufacturing paradigm. Mass personalisation focuses on uniquely made products to individuals at scale. Global challenges encourage mass personalisation manufacturing with efficiency competitive to mass production. Driven by individualisation as a trend and enabled by increasing digitalisation, mass personalisation can go beyond today's mass customisation. This paper aims to introduce Mass Personalisation as a Service (MPaaS) to address unique and complex requirements at scale by harnessing Industry 4.0 technologies, including Internet of Things, Additive Manufacturing, Big Data, Cloud Manufacturing, Digital Twin, and Blockchain. A case study for the implementation of MPaaS in personalised face masks is presented. The workforce with constant exposure to contaminants requires personal protective equipment (PPE), such as facemasks, for longer hours resulting in pressure-related ulcers. This prolonged use of PPE highlights the importance of personalisation to avoid ulcers and other related health concerns. Most studies have used Additive Manufacturing for individualisation and cloud capabilities for large-scale manufacturing. This study develops a framework and mathematical model to demonstrate the capability of the proposed solution to address one of the most critical challenges by making personalised face masks as an essential PPE in the critical industrial environment.},
langid = {english},
keywords = {Additive Manufacturing,Cloud Manufacturing,Industry 4.0,Internet of Things,Personalisation,Service Oriented Architecture},
file = {/Users/guru/Zotero/storage/E4ZJICB3/S1474034621001907.html}
}
@article{Ahmed20151193,
type = {Article},
title = {High-Throughput Transmission-Quality-Aware Broadcast Routing in Cognitive Radio Networks},
author = {Ahmed, Ejaz and Qadir, Junaid and Baig, Adeel},
year = {2015},
journal = {Wireless Networks},
volume = {21},
number = {4},
pages = {1193--1210},
doi = {10.1007/s11276-014-0843-6},
abstract = {Cognitive radio is an enabling technology of dynamic spectrum access (DSA) networking. In DSA, unlicensed secondary users can coexist with primary licensed users and can share the radio spectrum opportunistically. Broadcasting is an important networking primitive that is useful for many CRN applications such as control information dissemination, warning notification, etc. Unfortunately, the sporadic channels availability degrades the performance of broadcast routing. The quality of a broadcast transmission on a particular channel depends on the channel quality of all the receivers for the same transmitter. Current broadcast routing protocols lack transmission quality awareness. In this paper, we develop a transmission quality-aware broadcasting framework, comprising algorithm for transmission quality-aware broadcast routing in multi-radio dynamic-spectrum-access CRNs, and formulate a transmission quality metric wherein we consider a receiver-centric view rather than a transmission-centric view. We perform a detailed simulation performance evaluation of our proposed framework using OMNeT++. The proposed broadcast routing algorithm is validated by comparing results with state-of-the-art routing algorithms. Analysis of the results shows average performance gains of approximately 40 \% in throughput and packet delivery ratio. \textcopyright{} 2014, Springer Science+Business Media New York.},
publication_stage = {Final},
source = {Scopus}
}
@article{Akbar2017,
type = {Article},
title = {A Meta-Model of Software Development Project States Behavioral Study of Software Projects},
author = {Akbar, Rehan and Hassan, Mohd Fadzil and Abdullah, Azrai},
year = {2017},
journal = {Journal of Software: Evolution and Process},
volume = {29},
number = {4},
doi = {10.1002/smr.1820},
abstract = {Software development project during its lifecycle passes through various states. These states describe the condition, status, and behavior of software projects. In the present study, these states are defined based on the various activities performed during project lifecycle like initial environment setup, requirement analysis, coding, testing, problem resolution, and completion. The taxonomy of these states and substates is defined, and a project states meta-model is designed. The meta-model is composed of states and substates of the software projects. Detailed case studies of real projects have been conducted to validate the states of the meta-model. Evidences are collected; and events and observations are recorded about existence of the states, execution flow, duration, and behavior. The evidences, events, and observations are presented in the sequence to translate them into results. Results show that project states exist in all projects such that each software project passes through these states serially and in particular cases, a few states may exist in parallel. Project states show the status and progress of the software projects. It is found that issues in software projects can effectively be resolved by performing micro project management activities of the projects states. Project states meta-model provides basic structure for deriving new models. Copyright \textcopyright{} 2016 John Wiley \& Sons, Ltd.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{Albuquerque202221,
type = {Conference Paper},
title = {Comprehending the Use of Intelligent Techniques to Support Technical Debt Management},
author = {Albuquerque, Danyllo and Guimaraes, Everton and Tonin, Graziela and Perkusich, Mirko and Almeida, Hyggo and Perkusich, Angelo},
year = {2022},
series = {Proceedings - {{International Conference}} on {{Technical Debt}} 2022, {{TechDebt}} 2022},
pages = {21--30},
doi = {10.1145/3524843.3528097},
abstract = {Technical Debt (TD) refers to the consequences of taking shortcuts when developing software. Technical Debt Management (TDM) becomes complex since it relies on a decision process based on multiple and heterogeneous data, which are not straightforward to be synthesized. In this context, there is a promising opportunity to use Intelligent Techniques to support TDM activities since these techniques explore data for knowledge discovery, reasoning, learning, or supporting decision-making. Although these techniques can be used for improving TDM activities, there is no empirical study exploring this research area. This study aims to identify and analyze solutions based on Intelligent Techniques employed to sup-port TDM activities. A Systematic Mapping Study was performed, covering publications between 2010 and 2020. From 2276 extracted studies, we selected 111 unique studies. We found a positive trend in applying Intelligent Techniques to support TDM activities, being Machine Learning, Reasoning Under Uncertainty, and Natu-ral Language Processing the most recurrent ones. Identification, measurement, and monitoring were the more recurrent TDM ac-tivities, whereas Design, Code, and Architectural were the most frequently investigated TD types. Although the research area is up-and-coming, it is still in its infancy, and this study provides a baseline for future research. \textcopyright{} 2022 ACM.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{alizadeh2019:refbot,
title = {{{RefBot}}: {{Intelligent}} Software Refactoring Bot},
booktitle = {2019 34th {{IEEE}}/{{ACM}} International Conference on Automated Software Engineering ({{ASE}})},
author = {Alizadeh, Vahid and Ouali, Mohamed Amine and Kessentini, Marouane and Chater, Meriem},
year = {2019},
month = nov,
pages = {823--834},
issn = {2643-1572},
doi = {10.1109/ASE.2019.00081},
abstract = {The adoption of refactoring techniques for continuous integration received much less attention from the research community comparing to root-canal refactoring to fix the quality issues in the whole system. Several recent empirical studies show that developers, in practice, are applying refactoring incrementally when they are fixing bugs or adding new features. There is an urgent need for refactoring tools that can support continuous integration and some recent development processes such as DevOps that are based on rapid releases. Furthermore, several studies show that manual refactoring is expensive and existing automated refactoring tools are challenging to configure and integrate into the development pipelines with significant disruption cost. In this paper, we propose, for the first time, an intelligent software refactoring bot, called RefBot. Integrated into the version control system (e.g. GitHub), our bot continuously monitors the software repository, and it is triggered by any "open" or "merge" action on pull requests. The bot analyzes the files changed during that pull request to identify refactoring opportunities using a set of quality attributes then it will find the best sequence of refactorings to fix the quality issues if any. The bot recommends all these refactorings through an automatically generated pull-request. The developer can review the recommendations and their impacts in a detailed report and select the code changes that he wants to keep or ignore. After this review, the developer can close and approve the merge of the bot's pull request. We quantitatively and qualitatively evaluated the performance and effectiveness of RefBot by a survey conducted with experienced developers who used the bot on both open source and industry projects.},
keywords = {Bot (Internet),Manuals,Measurement,Object oriented modeling,Pipelines,refactoring,Software,Software bot,Software quality,Tools},
file = {/Users/guru/Zotero/storage/9LQ72XWF/Alizadeh et al_2019_RefBot_2019 34th IEEEACM international conference on automated software engineering (ASE).pdf}
}
@inproceedings{Alkubaisy202180,
type = {Conference Paper},
title = {{{ConfIs}}: {{A}} Tool for Privacy and Security Analysis and Conflict Resolution for Supporting {{GDPR}} Compliance through Privacy-by-Design},
author = {Alkubaisy, Duaa and Piras, Luca and {Al-Obeidallah}, Mohammed Ghazi and Cox, Karl and Mouratidis, Haralambos},
year = {2021},
series = {International {{Conference}} on {{Evaluation}} of {{Novel Approaches}} to {{Software Engineering}}, {{ENASE}} - {{Proceedings}}},
volume = {2021-April},
pages = {80--91},
abstract = {Privacy and security requirements, and their potential conflicts, are increasingly having more and more importance. It is becoming a necessary part to be considered, starting from the very early stages of requirements engineering, and in the entire software engineering cycle, for the design of any software system. In the last few years, this has been even more emphasized and required by the law. A relevant example is the case of the General Data Protection Regulation (GDPR), which requires organizations, and their software engineers, to enforce and guarantee privacy-by-design to make their platforms compliant with the regulation. In this context, complex activities related to privacy and security requirements elicitation, analysis, mapping and identification of potential conflicts, and the individuation of their resolution, become crucial. In the literature, there is not available a comprehensive requirement engineering oriented tool for supporting the requirements analyst. In this paper, we propose ConfIs, a tool for supporting the analyst in performing a process covering these phases in a systematic and interactive way. We present ConfIs and its process with a realistic example from DEFeND, an EU project aiming at supporting organizations in achieving GDPR compliance. In this context, we evaluated ConfIs by involving privacy/security requirements experts, which recognized our tool and method as supportive, concerning these complex activities. Copyright \textcopyright{} 2021 by SCITEPRESS - Science and Technology Publications, Lda. All rights reserved},
publication_stage = {Final},
source = {Scopus}
}
@article{alnafessah2021:qualityaware,
title = {Quality-Aware {{DevOps}} Research: {{Where}} Do We Stand?},
author = {Alnafessah, Ahmad and Gias, Alim Ul and Wang, Runan and Zhu, Lulai and Casale, Giuliano and Filieri, Antonio},
year = {2021},
journal = {IEEE access : practical innovations, open solutions},
volume = {9},
pages = {44476--44489},
issn = {2169-3536},
doi = {10.1109/ACCESS.2021.3064867},
abstract = {DevOps is an emerging paradigm that reduces the barriers between developers and operations teams to offer continuous fast delivery and enable quick responses to changing requirements within the software life cycle. A significant volume of activity has been carried out in recent years with the aim of coupling DevOps stages with tools and methods to improve the quality of the produced software and the underpinning delivery methodology. While the research community has produced a sustained effort by conducting numerous studies and innovative development tools to support quality analyses within DevOps, there is still a limited cohesion between the research themes in this domain and a shortage of surveys that holistically examine quality engineering work within DevOps. In this paper, we address the gap by comprehensively surveying existing efforts in this area, categorizing them according to the stage of the DevOps lifecycle to which they primarily contribute. The survey holistically spans across all the DevOps stages, identify research efforts to improve architectural design, modeling and infrastructure-as-code, continuous-integration/continuous-delivery (CI/CD), testing and verification, and runtime management. Our analysis also outlines possible directions for future work in quality-aware DevOps, looking in particular at AI for DevOps and DevOps for AI software.},
keywords = {artificial intelligence,Artificial intelligence,CI/CD,Computer architecture,DevOps,infrastructure as code,primary,Production,Software,Software architecture,testing,Testing,Tools,verification},
file = {/Users/guru/Zotero/storage/6JA74Y95/Alnafessah et al_2021_Quality-aware DevOps research_IEEE access practical innovations, open solutions.pdf}
}
@article{Alshammari2022,
type = {Article},
title = {Analytical Evaluation of {{SOA}} and {{SCRUM}} Business Process Management Approaches for {{IoT-Based}} Services Development},
author = {Alshammari, Fahad H.},
year = {2022},
journal = {Scientific Programming},
volume = {2022},
doi = {10.1155/2022/3556809},
abstract = {The SCRUM approach and Service-Oriented Architecture (SOA) framework are critical in assessing the factors that influence the efficiency of a business process and ensuring that business objectives are fulfilled, and the process is on track to meet those objectives. Flexibility and change adoption are critical features for both SCRUM and SOA approaches. Even though both sides encourage agility, the integration of the two independent concepts (SOA is the architectural framework while SCRUM is the development process) should be considered before being used in software management and development projects. This study assessed and analyzed both SCRUM and SOA's diverse and different software architectural frameworks and development methodologies as well as their environment, which is integrated with the context of software project management and development setup for the software development industry. In addition, this study explores the similarities between the SCRUM process model and the SOA architectural framework to see if they are compatible and, if so, how they may be combined to enhance SOA-based projects. This research also looks at how to build and use a SCRUM methodology for large-scale SOA projects. As a result, SCRUM was chosen as the software development methodology for a research and development project based on SOA. In terms of project development and implementation, the complete project structure is made up of eight main parts. \textcopyright{} 2022 Fahad H. Alshammari.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{amershi2019:software,
title = {Software Engineering for Machine Learning: {{A}} Case Study},
booktitle = {2019 {{IEEE}}/{{ACM}} 41st International Conference on Software Engineering: {{Software}} Engineering in Practice ({{ICSE-SEIP}})},
author = {Amershi, Saleema and Begel, Andrew and Bird, Christian and DeLine, Robert and Gall, Harald and Kamar, Ece and Nagappan, Nachiappan and Nushi, Besmira and Zimmermann, Thomas},
year = {2019},
month = may,
pages = {291--300},
doi = {10.1109/ICSE-SEIP.2019.00042},
abstract = {Recent advances in machine learning have stimulated widespread interest within the Information Technology sector on integrating AI capabilities into software and services. This goal has forced organizations to evolve their development processes. We report on a study that we conducted on observing software teams at Microsoft as they develop AI-based applications. We consider a nine-stage workflow process informed by prior experiences developing AI applications (e.g., search and NLP) and data science tools (e.g. application diagnostics and bug reporting). We found that various Microsoft teams have united this workflow into preexisting, well-evolved, Agile-like software engineering processes, providing insights about several essential engineering challenges that organizations may face in creating large-scale AI solutions for the marketplace. We collected some best practices from Microsoft teams to address these challenges. In addition, we have identified three aspects of the AI domain that make it fundamentally different from prior software application domains: 1) discovering, managing, and versioning the data needed for machine learning applications is much more complex and difficult than other types of software engineering, 2) model customization and model reuse require very different skills than are typically found in software teams, and 3) AI components are more difficult to handle as distinct modules than traditional software components - models may be "entangled" in complex ways and experience non-monotonic error behavior. We believe that the lessons learned by Microsoft teams will be valuable to other organizations.},
keywords = {artifical intelligence,Buildings,data,Data models,machine learning,Machine learning,Organizations,primary,process,repeated_study,Software,software engineering,Software engineering},
file = {/Users/guru/Zotero/storage/KGDKNELG/Amershi et al_2019_Software engineering for machine learning_2019 IEEEACM 41st international conference on software engineering Software engineering in practice (ICSE-SEIP).pdf}
}
@inproceedings{amershi2019:softwarea,
title = {Software Engineering for Machine Learning: A Case Study},
shorttitle = {Software Engineering for Machine Learning},
booktitle = {Proceedings of the 41st {{International Conference}} on {{Software Engineering}}: {{Software Engineering}} in {{Practice}}},
author = {Amershi, Saleema and Begel, Andrew and Bird, Christian and DeLine, Robert and Gall, Harald and Kamar, Ece and Nagappan, Nachiappan and Nushi, Besmira and Zimmermann, Thomas},
year = {2019},
month = may,
series = {{{ICSE-SEIP}} '19},
pages = {291--300},
publisher = {{IEEE Press}},
address = {{Montreal, Quebec, Canada}},
doi = {10.1109/ICSE-SEIP.2019.00042},
urldate = {2023-01-15},
abstract = {Recent advances in machine learning have stimulated widespread interest within the Information Technology sector on integrating AI capabilities into software and services. This goal has forced organizations to evolve their development processes. We report on a study that we conducted on observing software teams at Microsoft as they develop AI-based applications. We consider a nine-stage workflow process informed by prior experiences developing AI applications (e.g., search and NLP) and data science tools (e.g. application diagnostics and bug reporting). We found that various Microsoft teams have united this workflow into preexisting, well-evolved, Agile-like software engineering processes, providing insights about several essential engineering challenges that organizations may face in creating large-scale AI solutions for the marketplace. We collected some best practices from Microsoft teams to address these challenges. In addition, we have identified three aspects of the AI domain that make it fundamentally different from prior software application domains: 1) discovering, managing, and versioning the data needed for machine learning applications is much more complex and difficult than other types of software engineering, 2) model customization and model reuse require very different skills than are typically found in software teams, and 3) AI components are more difficult to handle as distinct modules than traditional software components --- models may be "entangled" in complex ways and experience non-monotonic error behavior. We believe that the lessons learned by Microsoft teams will be valuable to other organizations.},
keywords = {AI,data,primary,process,software engineering},
file = {/Users/guru/Zotero/storage/3X2SYXM4/Amershi et al_2019_Software engineering for machine learning_Proceedings of the 41st International Conference on Software Engineering Software Engineering in Practice.pdf}
}
@inproceedings{Amershi2019291,
type = {Conference Paper},
title = {Software Engineering for Machine Learning: {{A}} Case Study},
author = {Amershi, Saleema and Begel, Andrew and Bird, Christian and DeLine, Robert and Gall, Harald and Kamar, Ece and Nagappan, Nachiappan and Nushi, Besmira and Zimmermann, Thomas},
year = {2019},
series = {Proceedings - 2019 {{IEEE}}/{{ACM}} 41st {{International Conference}} on {{Software Engineering}}: {{Software Engineering}} in {{Practice}}, {{ICSE-SEIP}} 2019},
pages = {291--300},
doi = {10.1109/ICSE-SEIP.2019.00042},
abstract = {Recent advances in machine learning have stimulated widespread interest within the Information Technology sector on integrating AI capabilities into software and services. This goal has forced organizations to evolve their development processes. We report on a study that we conducted on observing software teams at Microsoft as they develop AI-based applications. We consider a nine-stage workflow process informed by prior experiences developing AI applications (e.g., search and NLP) and data science tools (e.g. application diagnostics and bug reporting). We found that various Microsoft teams have united this workflow into preexisting, well-evolved, Agile-like software engineering processes, providing insights about several essential engineering challenges that organizations may face in creating large-scale AI solutions for the marketplace. We collected some best practices from Microsoft teams to address these challenges. In addition, we have identified three aspects of the AI domain that make it fundamentally different from prior software application domains: 1) discovering, managing, and versioning the data needed for machine learning applications is much more complex and difficult than other types of software engineering, 2) model customization and model reuse require very different skills than are typically found in software teams, and 3) AI components are more difficult to handle as distinct modules than traditional software components-models may be 'entangled' in complex ways and experience non-monotonic error behavior. We believe that the lessons learned by Microsoft teams will be valuable to other organizations. \textcopyright{} 2019 IEEE.},
publication_stage = {Final},
source = {Scopus},
keywords = {primary,repeated_study}
}
@inproceedings{amershi2019software,
title = {Software Engineering for Machine Learning: {{A}} Case Study},
booktitle = {2019 {{IEEE}}/{{ACM}} 41st International Conference on Software Engineering: {{Software}} Engineering in Practice ({{ICSE-SEIP}})},
author = {Amershi, Saleema and Begel, Andrew and Bird, Christian and DeLine, Robert and Gall, Harald and Kamar, Ece and Nagappan, Nachiappan and Nushi, Besmira and Zimmermann, Thomas},
year = {2019},
pages = {291--300},
organization = {{IEEE}},
file = {/Users/guru/Zotero/storage/QJ7SC6DZ/Amershi et al. - 2019 - Software Engineering for Machine Learning A Case .pdf}
}
@inproceedings{anh2021:imbalanced,
title = {An Imbalanced Deep Learning Model for Bug Localization},
booktitle = {2021 28th Asia-Pacific Software Engineering Conference Workshops ({{APSEC}} Workshops)},
author = {Anh, Bui Thi Mai and Luyen, Nguyen Viet},
year = {2021},
month = dec,
pages = {32--40},
doi = {10.1109/APSECW53869.2021.00017},
abstract = {Debugging and locating faulty source files are tedious and time-consuming tasks. To improve the productivity and to help developers focus on crucial files, automated bug localization models have been proposed for years. These models recommend buggy source files by ranking them according to their relevance to a given bug report. There are two significant challenges in this research field: (i) narrowing the lexical gap between bug reports which are typically described using natural languages and source files written in programming languages; (ii) reducing the impact of imbalanced data distribution in model training as a far fewer of source files relate to a given bug report while the majority of them are not relevant. In this paper, we propose a deep neural network model to investigate essential information hidden within bug reports and source files through capturing not only lexical relations but also semantic details as well as domain knowledge features such as historical bug fixings, code change history. To address the skewed class distribution, we apply a focal loss function combining with a bootstrapping method to rectify samples of the minority class within iterative training batches to our proposed model. We assessed the performance of our approach over six large scale Java open-source projects. The empirical results have showed that the proposed method outperformed other state-of-the-art models by improving the Mean Average Precision (MAP) and Mean Reciprocal Rank (MRR) scores from 3\% to 11\% and from 2\% to 14\%, respectively.},
keywords = {bootstrapping,bug localization,Computer bugs,Conferences,Deep learning,deep neural network,imbalanced data-set,Location awareness,Neural networks,Productivity,Training},
file = {/Users/guru/Zotero/storage/UPCQUKEM/Anh and Luyen - 2021 - An imbalanced deep learning model for bug localiza.pdf}
}
@inproceedings{Antonini2022,
type = {Conference Paper},
title = {Tiny-{{MLOps}}: {{A}} Framework for Orchestrating {{ML}} Applications at the Far Edge of {{IoT}} Systems},
author = {Antonini, Mattia and Pincheira, Miguel and Vecchio, Massimo and Antonelli, Fabio},
year = {2022},
series = {{{IEEE Conference}} on {{Evolving}} and {{Adaptive Intelligent Systems}}},
volume = {2022-May},
doi = {10.1109/EAIS51927.2022.9787703},
abstract = {Empowering the Internet of Things devices with Artificial Intelligence capabilities can transform all vertical applications domains within the next few years. Current approaches favor hosting Machine Learning (ML) models on Linux-based single-board computers. Nevertheless, these devices' cost and energy requirements limit the possible application scenarios. Conversely, today's available 32-bit microcontrollers have much lower costs and only need a few milliwatts to operate, making them an energy-efficient and cost-effective alternative. However, the latter devices, usually referred to as far edge devices, have stringent resource constraints and host non-Linux-based embedded real-time operating systems. Therefore, orchestrating such devices executing portions of ML applications represents a major challenge with current tools and frameworks. This paper formally introduces the Tiny-MLOps framework as the specialization of standard ML orchestration practices, including far edge devices in the loop. To this aim, we will tailor each phase of the classical ML orchestration loop to the reduced resources available onboard typical IoT devices. We will rely on the proposed framework to deliver adaptation and evolving capabilities to resource-constrained IoT sensors mounted on an industrial rotary machine to detect anomalies. As a feasibility study, We will show how to programmatically re-deploy ML-based anomaly detection models to far edge devices. Our preliminary experiments measuring the system performance in terms of deployment, loading, and inference latency of the ML models will corroborate the usefulness of our proposal. \textcopyright{} 2022 IEEE.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{Apaza201827,
type = {Conference Paper},
title = {{{ERS-Tool}}: {{Hybrid}} Model for Software Requirements Elicitation in {{Spanish}} Language},
author = {Apaza, Ren{\'a}n Dar{\'i}o Gonzales and Barrios, Jhon Edilberto Monroy and Becerra, Diego Alonso Iquira and Quispe, Jos{\'e} Alfredo Herrera},
year = {2018},
series = {{{ACM International Conference Proceeding Series}}},
pages = {27--30},
doi = {10.1145/3220228.3220255},
abstract = {The nature of the software requirements is subjective and varied. For this reason the level of complexity increases according to the volume, especially when the requirements are made in a natural language. Therefore obtain quality software requirements that are understandable and unambiguous in the Spanish language becomes a necessity. First, a controlled syntax was proposed to express software requirements taking into account the static and dynamic behavior among the different actors of the system, where the expressions are elaborated based on the Backus\textendash Naur form (BNF). Then a set of writing rules were adapted to the Spanish language, creating four additional rules. Finally, the results of the case study had high accuracy in understandability; also the ambiguity of requirements elicitation was reduced. In addition to improving the development of software engineering activities, since there are no tools available for the elicitation of software requirements with language Spanish. \textcopyright{} 2018 Association for Computing Machinery.},
publication_stage = {Final},
source = {Scopus}
}
@incollection{atif2021:chapter,
title = {Chapter 1 - {{Artificial Intelligence}} ({{AI}})-Enabled Remote Learning and Teaching Using {{Pedagogical Conversational Agents}} and {{Learning Analytics}}},
booktitle = {Intelligent {{Systems}} and {{Learning Data Analytics}} in {{Online Education}}},
author = {Atif, Amara and Jha, Meena and Richards, Deborah and Bilgin, Ayse A.},
editor = {Caball{\'e}, Santi and Demetriadis, Stavros N. and {G{\'o}mez-S{\'a}nchez}, Eduardo and Papadopoulos, Pantelis M. and Weinberger, Armin},
year = {2021},
month = jan,
series = {Intelligent {{Data-Centric Systems}}},
pages = {3--29},
publisher = {{Academic Press}},
doi = {10.1016/B978-0-12-823410-5.00013-9},
urldate = {2023-01-15},
abstract = {Advancements in Artificial Intelligence (AI) have potentially created new ways to teach and learn, such as the use of Learning Analytics (LA) to monitor and support students using data captured in Learning Management Systems (LMS). To add human rather than data-based support, in this chapter, we present our use of AI-enabled Pedagogical Conversational Agents (CAs) over the past 12 months in multiple units/subjects across two universities. These CAs play a role similar to a teacher or peer learner by sharing the expertise they have acquired from the knowledge contained in student\textendash teacher social interactions in LMS forums and grade book teacher feedback. Unlike teachers or peers, these CAs can be contacted anonymously at any time, they don't mind being asked the same question repeatedly and they can empower students to explore options and outcomes. We conclude the chapter with a discussion of the potential of LA to automate CA interactions.},
isbn = {978-0-12-823410-5},
langid = {english},
keywords = {artificial intelligence,higher education,learning analytics,learning management system,Pedagogical conversational agents,remote teaching and learning,student engagement},
file = {/Users/guru/Zotero/storage/WP34S6DH/B9780128234105000139.html}
}
@inproceedings{Ausmanas2012175,
type = {Conference Paper},
title = {Operations Engineering and Management in {{OHMS}} Company},
author = {Ausmanas, Naglis and Bargelis, Algirdas},
year = {2012},
series = {International {{Conference}} on {{Industrial Logistics}}, {{ICIL}} 2012 - {{Conference Proceedings}}},
pages = {175--181},
abstract = {Global industrial logistics tasks are to seek and help better operations engineering and management in order handled manufacturing systems (OHMS). The industrial logistics in lean manufacturing approach is considered in this paper. The collaboration in-bound, internal and out-bound areas among Lithuanian producers are considered in this research. The forecasting model for manufacturing process, time and cost has been examined in this research. Appropriate knowledge base and intelligent software has been applied and tested for manufacturing cost forecasting at the early stage of a new order engineering stage.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{Ayala2022,
type = {Conference Paper},
title = {{{DOGO4ML}}: {{Development}}, Operation and Data Governance for {{ML-based}} Software Systems},
author = {Ayala, Claudia and Bilalli, Besim and G{\'o}mez, Cristina and {Mart{\'i}nez-Fern{\'a}ndez}, Silverio},
year = {2022},
series = {{{CEUR Workshop Proceedings}}},
volume = {3144},
abstract = {Machine Learning based Software Systems (MLSS) are becoming increasingly pervasive in today's society and can be found in virtually every domain. Building MLSS is challenging due to their interdisciplinary nature. MLSS engineering encompasses multiple disciplines, of which Data Engineering and Software Engineering appear as most relevant. The DOGO4ML project aims at reconciling these two disciplines for providing a holistic end-to-end framework to develop, operate and govern MLSS and their data. It proposes to combine and intertwine two software cycles: the DataOps and the DevOps lifecycles. The DataOps lifecycle manages the complexity of dealing with the big data needed by ML models, while the DevOps lifecycle is in charge of building the system that embeds these models. In this paper, we present the main vision and goals of the project as well as its expected contributions and outcomes. Although the project is in its initial stage, the progress of the research undertaken so far is detailed. \textcopyright{} 2021 The Authors.},
publication_stage = {Final},
source = {Scopus},
keywords = {primary},
file = {/Users/guru/Zotero/storage/4KRPVF92/RP-paper3.pdf;/Users/guru/Zotero/storage/TA6RWSUI/Ayala et al. - 2022 - DOGO4ML Development, operation and data governanc.pdf}
}
@article{bader2021ai,
title = {{{AI}} in Software Engineering at {{Facebook}}},
author = {Bader, Johannes and Kim, Sonia Seohyun and Luan, Frank Sifei and Chandra, Satish and Meijer, Erik},
year = {2021},
journal = {IEEE Software},
volume = {38},
number = {4},
pages = {52--61},
publisher = {{IEEE}}
}
@article{bamhdi2021:requirements,
title = {Requirements Capture and Comparative Analysis of Open Source versus Proprietary Service Oriented Architecture},
author = {Bamhdi, Alwi},
year = {2021},
month = feb,
journal = {Computer Standards \& Interfaces},
volume = {74},
pages = {103468},
issn = {0920-5489},
doi = {10.1016/j.csi.2020.103468},
urldate = {2023-01-15},
abstract = {Service Oriented Architecture (SOA) integrates information systems towards an agile and reusable service-based connectivity. It is an approach amalgamating large scale private/public computer systems and other resources with continuous phenomenal advent evolution and leveraging of the World Wide Web (WWW, commonly referred to as the Web) social media, mobile communications, Big Data (BD), data analytics, Machine Learning (ML) based optimisation, Cloud Computing (CC) and Internet of Things (IoT), commonly known as Advanced Technologies (AT). Implementing SOA, whether Open Source Software (OSS) or proprietary or absolute freeware is a choice to be made which depends on the organisation's requirements in light of AT as well as a host of delivery and security concerns. In this paper, a comparative analysis of an open source vs. proprietary SOA for large scale computer systems servicing AT is presented by examining their main efficacies, features, advantages and disadvantages and capturing their generic technical functional and non-functional requirements in a unified manner. Furthermore, the SOA evaluation criteria, recommendations and conclusions are also presented.},
langid = {english},
keywords = {Advanced technologies,Closed source proprietary,Open source,Requirements capture,SOA,Web 1.0/2.0/3.0/4./5.0/6.0,Web services access},
file = {/Users/guru/Zotero/storage/9B7H9EIE/S0920548920303548.html}
}
@inproceedings{barati2019enhancing,
title = {Enhancing User Privacy in {{IoT}}: Integration of {{GDPR}} and Blockchain},
booktitle = {International Conference on Blockchain and Trustworthy Systems},
author = {Barati, Masoud and Rana, Omer},
year = {2019},
pages = {322--335},
organization = {{Springer}}
}
@inproceedings{Bartsch2010495,
type = {Conference Paper},
title = {Supporting Authorization Policy Modification in Agile Development of Web Applications},
author = {Bartsch, Steffen},
year = {2010},
series = {{{ARES}} 2010 - 5th {{International Conference}} on {{Availability}}, {{Reliability}}, and {{Security}}},
pages = {495--500},
doi = {10.1109/ARES.2010.19},
abstract = {Web applications are increasingly developed in Agile development processes. Business-centric Web applications need complex authorization policies to securely implement business processes. As part of the Agile process, integrating domain experts into the development of RBAC authorization policies improves the policies, but remains difficult. For policy modifications, high numbers of options need to be considered. To ease the management task and integrate domain experts, we propose an algorithm and prototype tool. The AI-based change-support algorithm helps to find the suitable modification actions according to desired changes that are given in policy test cases. We also present a prototype GUI for domain experts to employ the algorithm and report on early results of non-security experts using the tool in a real-world business Web application. \textcopyright{} 2010 IEEE.},
publication_stage = {Final},
source = {Scopus}
}
@article{bashari2018:selfadaptation,
title = {Self-Adaptation of Service Compositions through Product Line Reconfiguration},
author = {Bashari, Mahdi and Bagheri, Ebrahim and Du, Weichang},
year = {2018},
month = oct,
journal = {Journal of Systems and Software},
volume = {144},
pages = {84--105},
issn = {0164-1212},
doi = {10.1016/j.jss.2018.05.069},
urldate = {2023-01-15},
abstract = {The large number of published services has motivated the development of tools for creating customized composite services known as service compositions. While service compositions provide high agility and development flexibility, they can also pose challenges when it comes to delivering guaranteed functional and non-functional requirements. This is primarily due to the highly dynamic environment in which services operate. In this paper, we propose adaptation mechanisms that are able to effectively maintain functional and non-functional quality requirements in service compositions derived from software product lines. Unlike many existing work, the proposed adaptation mechanism does not require explicit user-defined adaptation strategies. We adopt concepts from the software product line engineering paradigm where service compositions are viewed as a collection of features and adaptation happens through product line reconfiguration. We have practically implemented the proposed mechanism in ourMagus tool suite and performed extensive experiments, which show that our work is both practical and efficient for automatically adapting service compositions once violations of functional or non-functional requirements are observed.},
langid = {english},
keywords = {Feature model,Self adaptation,Service composition,Software product lines}
}
@inproceedings{Basu202116,
type = {Conference Paper},
title = {Designing a Bot for Efficient Distribution of Service Requests},
author = {Basu, Arkadip and Banerjee, Kunal},
year = {2021},
series = {Proceedings - 2021 {{IEEE}}/{{ACM}} 3rd {{International Workshop}} on {{Bots}} in {{Software Engineering}}, {{BotSE}} 2021},
pages = {16--20},
doi = {10.1109/BotSE52550.2021.00011},
abstract = {The tracking and timely resolution of service requests is one of the major challenges in agile project management. Having an efficient solution to this problem is a key requirement for Walmart to facilitate seamless collaboration across its different business units. The Jira software is one of the popular choices in industries for monitoring such service requests. A service request once logged into the system by a reporter is referred to as a (Jira) ticket which is assigned to an engineer for servicing. In this work, we explore how the tickets which may arise in any of the Walmart stores and offices distributed over several countries can be assigned to engineers efficiently. Specifically, we will discuss how the introduction of a bot for automated ticket assignment has helped in reducing the disparity in ticket assignment to engineers by human managers and also decreased the average ticket resolution time- thereby improving the experience for both the reporters and the engineers. Additionally, the bot sends reminders and status updates over different business communication platforms for timely tracking of tickets; it can be suitably modified to provision for human intervention in case of special needs by some teams. The current study conducted over data collected from various teams within Walmart shows the efficacy of our bot. \textcopyright{} 2021 IEEE.},
publication_stage = {Final},
source = {Scopus}
}
@incollection{batarseh2018:chapter,
title = {Chapter 9 - {{A Deployment Life Cycle Model}} for {{Agricultural Data Systems Using Kansei Engineering}} and {{Association Rules}}},
booktitle = {Federal {{Data Science}}},
author = {Batarseh, Feras A. and Yang, Ruixin},
editor = {Batarseh, Feras A. and Yang, Ruixin},
year = {2018},
month = jan,
pages = {141--159},
publisher = {{Academic Press}},
doi = {10.1016/B978-0-12-812443-7.00009-0},
urldate = {2023-01-15},
abstract = {The early promise of artificial intelligence that was sought by many is now partially drifting toward data science and big data analytics. Software engineers have been increasingly overwhelmed by the amounts of data available to them during development, but their main quandary is that they are unable to draw solid conclusions from that data without proper analytics. As a result of the recent evolution of software engineering, analytics have been directly applied to solve different challenges of the software development life cycle. Rarely however, have analytics been applied directly for deployment or user acceptance. This chapter introduces a life cycle model that uses the power of data analytics (i.e., association rules and Kansei) to guide a federal team through software testing, deployment, and user acceptance. The model is evaluated through an agricultural data system and is deployed to federal employees; experimental results are collected and presented.},
isbn = {978-0-12-812443-7},
langid = {english},
keywords = {Association rules,Data analytics,Federal analyst,Kansei engineering,Software testing,System deployment,User adoption},
file = {/Users/guru/Zotero/storage/U5FKQHSN/B9780128124437000090.html}
}
@incollection{batarseh2020:10,
title = {10 - {{The}} Application of Artificial Intelligence in Software Engineering: A Review Challenging Conventional Wisdom},
shorttitle = {10 - {{The}} Application of Artificial Intelligence in Software Engineering},
booktitle = {Data {{Democracy}}},
author = {Batarseh, Feras A. and Mohod, Rasika and Kumar, Abhinav and Bui, Justin},
editor = {Batarseh, Feras A. and Yang, Ruixin},
year = {2020},
month = jan,
pages = {179--232},
publisher = {{Academic Press}},
doi = {10.1016/B978-0-12-818366-3.00010-1},
urldate = {2023-01-15},
abstract = {The field of artificial intelligence (AI) is witnessing a recent upsurge in research, tools development, and deployment of applications. Multiple software companies are shifting their focus to developing intelligent systems; and many others are deploying AI paradigms to their existing processes. In parallel, the academic research community is injecting AI paradigms to provide solutions to traditional engineering problems. Similarly, AI has evidently been proved useful to software engineering (SE). When one observes the SE phases (requirements, design, development, testing, release, and maintenance), it becomes clear that multiple AI paradigms (such as neural networks, machine learning, knowledge-based systems, natural language processing) could be applied to improve the process and eliminate many of the major challenges that the SE field has been facing. This survey chapter is a review of the most commonplace methods of AI applied to SE. The review covers methods between years 1975\textendash 2017, for the requirements phase, 46 major AI-driven methods are found, 19 for design, 15 for development, 68 for testing, and 15 for release and maintenance. Furthermore, the purpose of this chapter is threefold; firstly, to answer the following questions: is there sufficient intelligence in the SE lifecycle? What does applying AI to SE entail? Secondly, to measure, formulize, and evaluate the overlap of SE phases and AI disciplines. Lastly, this chapter aims to provide serious questions to challenging the current conventional wisdom (i.e., status quo) of the state-of-the-art, craft a call for action, and to redefine the path forward.},
isbn = {978-0-12-818366-3},
langid = {english},
keywords = {Artificial intelligence paradigms,Design,Lifecycle phase,Requirements,Testing},
file = {/Users/guru/Zotero/storage/ELSX763V/B9780128183663000101.html}
}
@article{bates2020:literature,
title = {Literature {{Listing}}},
author = {Bates, Susan},
year = {2020},
month = mar,
journal = {World Patent Information},
volume = {60},
pages = {101946},
issn = {0172-2190},
doi = {10.1016/j.wpi.2019.101946},
urldate = {2023-01-15},
abstract = {The quarterly Literature Listing is intended as a current awareness service for readers indicating newly published books, journal and conference articles on: patent search techniques, databases, analysis and classifications; patent searcher certification; patents relating to a) life sciences and pharmaceuticals and b) software; patent policy and strategic issues; trade marks; designs; domain names; and articles reviewing historical aspects of intellectual property or reviewing specific topics/persons. The current Literature Listing was compiled end-November 2019. Key resources used are Scopus, Digital Commons, publishers' RSS feeds, and serendipity! Please feel free to send the author details of newly published reports/monographs/books for potential inclusion.},
langid = {english},
keywords = {Current awareness,Designs,Literature listing,Patent analysis,Patents,Trade marks},
file = {/Users/guru/Zotero/storage/I3D5TY6R/Bates_2020_Literature Listing_World Patent Information.pdf;/Users/guru/Zotero/storage/JM5RGJSR/S017221901930153X.html}
}
@article{Bauer2020445,
type = {Article},
title = {Modular Change Impact Analysis in Factory Systems: {{Guideline}} for Individual Configuration},
author = {Bauer, Harald and Haase, Paul and Sippl, Fabian and Ramakrishnan, Robert and Schilp, Johannes and Reinhart, Gunther},
year = {2020},
journal = {Production Engineering},
volume = {14},
number = {4},
pages = {445--456},
doi = {10.1007/s11740-020-00979-4},
abstract = {Shorter product innovation cycles, high variant products, and demand fluctuation, as well as equipment life cycles and technology life cycles force manufacturing companies to regularly change their manufacturing system. In order to address this challenge, an efficient and structured change management is required. As change causes and factory elements are connected via a complex network of relations and flows, an essential step in change management is the evaluation of considered adjustments with regard to their effects on the current production system. Depending on the context of the application, change impact analysis must process specific inputs and deliver different results. Current approaches, however, each focus only on selected aspects of the versatility of change effects. To address this challenge, this paper presents a modular approach for the individual design of change impact analysis. \textcopyright{} 2020, The Author(s).},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{bayar2022:comprehensive,
title = {A Comprehensive Big Data Framework for Energy Markets: {{Oil}} and Gas Demand Forecasting},
booktitle = {2022 3rd International Informatics and Software Engineering Conference ({{IISEC}})},
author = {Bayar, Alp and Ko{\c c}, Burcu and G{\"o}kalp, Mert Onuralp and {\"O}zden, Baran and Yeldan, Yi{\u g}it and Eren, P. Erhan and Ko{\c c}yi{\u g}it, Altan},
year = {2022},
month = dec,
pages = {1--6},
doi = {10.1109/IISEC56263.2022.9998216},
abstract = {Demand forecasting in the energy sector is essential for both countries and companies to plan their supply and demand. Agents in the highly volatile oil markets have to act fast and data-driven. In the literature, studies on oil or gasoline demand forecasting are carried out using traditional econometric and AI-based models, using static data for long periods. In this paper, short-term gasoline demand forecasting literature has been investigated. We focus on short-term demand prediction based on big data analytics and investigate potential data sources and architectures to collect data. To this end, several iterative meetings were conducted between the Data Science department, the Oil Trading department of an Oil \& Gas company, and researchers within an industry-academia cooperation project. Traditional data sources used for the problem are presented, and the applicability of real-time data to the problem is discussed. A big data architecture is proposed that can be used to predict the demand for petroleum products, mainly for gasoline in the U.S., for the transparency, amplitude, and availability of open data.},
keywords = {Big Data,big data analytics,Companies,Computer architecture,demand,Demand forecasting,forecasting,gasoline,oil,Oils,Soft sensors,Supply and demand}
}
@inproceedings{begel2019:best,
title = {Best Practices for Engineering {{AI-Infused}} Applications: {{Lessons}} Learned from Microsoft Teams},
booktitle = {2019 {{IEEE}}/{{ACM}} Joint 7th International Workshop on Conducting Empirical Studies in Industry ({{CESI}}) and 6th International Workshop on Software Engineering Research and Industrial Practice ({{SER}}\&{{IP}})},
author = {Begel, Andrew},
year = {2019},
month = may,
pages = {1--1},
issn = {2575-4793},
doi = {10.1109/CESSER-IP.2019.00008},
abstract = {Artificial intelligence and machine learning (AI/ML) are some of the newest trends to hit the software industry, compelling organizations to evolve their development processes to deliver novel products to their customers. In this talk, I describe a study in which we learned how Microsoft software teams develop AI/ML-based applications using a nine-stage AI workflow process informed by prior experiences developing early AI applications (e.g. search and NLP) and data science tools (e.g. application telemetry and bug reporting). Adapting this workflow into their pre-existing, well-evolved, Agile-like software engineering processes and job roles has resulted in a number of engineering challenges unique to the AI/ML domain, some universal to all teams, but others related to the amount of prior AI/ML experience and education the teams have. I tell you about some challenges and the solutions that teams have come up with. The lessons that Microsoft has learned can help other organizations embarking on their own path towards AI and ML.},
keywords = {AI,Conferences,Industries,Industry Practice,Machine learning,Machine Learning,Organizations,primary,Software,Software engineering},
file = {/Users/guru/Zotero/storage/99EEE6WJ/Begel - 2019 - Best practices for engineering AI-Infused applicat.pdf}
}
@inproceedings{begel2019:besta,
title = {Best Practices for Engineering {{AI-infused}} Applications: Lessons Learned from {{Microsoft}} Teams},
shorttitle = {Best Practices for Engineering {{AI-infused}} Applications},
booktitle = {Proceedings of the {{Joint}} 7th {{International Workshop}} on {{Conducting Empirical Studies}} in {{Industry}} and 6th {{International Workshop}} on {{Software Engineering Research}} and {{Industrial Practice}}},
author = {Begel, Andrew},
year = {2019},
month = may,
series = {{{CESSER-IP}} '19},
pages = {1},
publisher = {{IEEE Press}},
address = {{Montreal, Quebec, Canada}},
doi = {10.1109/CESSER-IP.2019.00008},
urldate = {2023-01-15},
abstract = {Artificial intelligence and machine learning (AI/ML) are some of the newest trends to hit the software industry, compelling organizations to evolve their development processes to deliver novel products to their customers. In this talk, I describe a study in which we learned how Microsoft software teams develop AI/ML-based applications using a nine-stage AI workflow process informed by prior experiences developing early AI applications (e.g. search and NLP) and data science tools (e.g. application telemetry and bug reporting). Adapting this workflow into their pre-existing, well-evolved, Agile-like software engineering processes and job roles has resulted in a number of engineering challenges unique to the AI/ML domain, some universal to all teams, but others related to the amount of prior AI/ML experience and education the teams have. I tell you about some challenges and the solutions that teams have come up with. The lessons that Microsoft has learned can help other organizations embarking on their own path towards AI and ML.},
keywords = {AI,industry practice,machine learning},
file = {/Users/guru/Zotero/storage/N44JEYPF/Begel_2019_Best practices for engineering AI-infused applications_Proceedings of the Joint 7th International Workshop on Conducting Empirical Studies in Industry and 6th International Workshop on Software En.pdf}
}
@article{behera2023:responsible,
title = {Responsible Natural Language Processing: {{A}} Principlist Framework for Social Benefits},
shorttitle = {Responsible Natural Language Processing},
author = {Behera, Rajat Kumar and Bala, Pradip Kumar and Rana, Nripendra P. and Irani, Zahir},
year = {2023},
month = mar,
journal = {Technological Forecasting and Social Change},
volume = {188},
pages = {122306},
issn = {0040-1625},
doi = {10.1016/j.techfore.2022.122306},
urldate = {2023-01-15},
abstract = {Businesses harness the power of natural language processing (NLP) to automate processes and make data-driven decisions. However, NLP raises concerns on a number of fronts due to its potential for disruption, which can be addressed with the assignment of responsibility. Therefore, responsible NLP (RNLP) can be designed as a principlist framework to ensure NLP systems are used in an ethical manner. The study proposes a principlist framework with the formulation of eight principlist ethical principles to ensure NLP is safe, secure and reliable for responsible decision making and subsequently results in social benefits. Using snowball sampling, data are collected from 15 informants, who represent senior-level positions in diversified industries. The analysis is performed with qualitative research methodology. The result produces two ethical practices. First is the adoption of RNLP as a disruptive technology for ethical decision making for social benefits and second is the creation of a culture of responsibility.},
langid = {english},
keywords = {Ethical work climate,Ethics,Responsible decision making,Responsible NLP,Social benefits},
file = {/Users/guru/Zotero/storage/22EBQJQB/S0040162522008277.html}
}
@inproceedings{belani2019requirements,
title = {Requirements Engineering Challenges in Building {{AI-based}} Complex Systems},
booktitle = {2019 {{IEEE}} 27th International Requirements Engineering Conference Workshops ({{REW}})},
author = {Belani, Hrvoje and Vukovic, Marin and Car, {\v Z}eljka},
year = {2019},
pages = {252--255},
organization = {{IEEE}}
}
@article{bharosa2022:rise,
title = {The Rise of {{GovTech}}: {{Trojan}} Horse or Blessing in Disguise? {{A}} Research Agenda},
shorttitle = {The Rise of {{GovTech}}},
author = {Bharosa, Nitesh},
year = {2022},
month = jul,
journal = {Government Information Quarterly},
volume = {39},
number = {3},
pages = {101692},
issn = {0740-624X},
doi = {10.1016/j.giq.2022.101692},
urldate = {2023-01-15},
abstract = {As GovTech solutions are steadily entering the public sector, they have yet to find their way into the mainstream literature. GovTech refers to socio-technical solutions \textendash{} that are developed and operated by private organisations \textendash{} intertwined with public sector components for facilitating processes in the public sector. GovTech solutions promise a superior customer journey for citizens and businesses compared to current government portals and front desks. GovTech solutions can be a blessing in disguise for governments struggling in their digital transformation journey, carrying the burden of public service innovation and replacing legacy systems with modern GovTech solutions. Yet, there are also concerns that GovTech solutions are a Trojan horse, exploiting the lack of technical knowledge at public agencies and shifting decision-making power from public agencies to market parties, thereby undermining digital sovereignty and public values. This paper develops a research agenda for GovTech based on a conceptual framework. This framework reveals four interrelated design areas for GovTech: institutional, governance, technical and human-centred design. Governments can employ the conceptual framework to further align and develop their strategies by focussing on GovTech governance, referring to the ability to manage the various interdependencies between the four design areas.},
langid = {english},
keywords = {Co-creation,Digital transformation,GovTech,Multiple-helix,Public service innovation,Trust frameworks},
file = {/Users/guru/Zotero/storage/AJ38BWLR/Bharosa_2022_The rise of GovTech_Government Information Quarterly.pdf;/Users/guru/Zotero/storage/IMG7JHKY/S0740624X22000259.html}
}
@article{biesialska2021:big,
title = {Big {{Data}} Analytics in {{Agile}} Software Development: {{A}} Systematic Mapping Study},
shorttitle = {Big {{Data}} Analytics in {{Agile}} Software Development},
author = {Biesialska, Katarzyna and Franch, Xavier and {Munt{\'e}s-Mulero}, Victor},
year = {2021},
month = apr,
journal = {Information and Software Technology},
volume = {132},
pages = {106448},
issn = {0950-5849},
doi = {10.1016/j.infsof.2020.106448},
urldate = {2023-01-15},
abstract = {Context: Over the last decade, Agile methods have changed the software development process in an unparalleled way and with the increasing popularity of Big Data, optimizing development cycles through data analytics is becoming a commodity. Objective: Although a myriad of research exists on software analytics as well as on Agile software development (ASD) practice on itself, there exists no systematic overview of the research done on ASD from a data analytics perspective. Therefore, the objective of this work is to make progress by linking ASD with Big Data analytics (BDA). Method: As the primary method to find relevant literature on the topic, we performed manual search and snowballing on papers published between 2011 and 2019. Results: In total, 88 primary studies were selected and analyzed. Our results show that BDA is employed throughout the whole ASD lifecycle. The results reveal that data-driven software development is focused on the following areas: code repository analytics, defects/bug fixing, testing, project management analytics, and application usage analytics. Conclusions: As BDA and ASD are fast-developing areas, improving the productivity of software development teams is one of the most important objectives BDA is facing in the industry. This study provides scholars with information about the state of software analytics research and the current trends as well as applications in the business environment. Whereas, thanks to this literature review, practitioners should be able to understand better how to obtain actionable insights from their software artifacts and on which aspects of data analytics to focus when investing in such initiatives.},
langid = {english},
keywords = {Agile software development,Artificial intelligence,Data analytics,Literature review,Machine learning,Software analytics},
file = {/Users/guru/Zotero/storage/EQNE33MC/S0950584920301981.html}
}
@inproceedings{Bilgaiyan2016112,
type = {Conference Paper},
title = {A Review of Software Cost Estimation in Agile Software Development Using Soft Computing Techniques},
author = {Bilgaiyan, Saurabh and Mishra, Samaresh and Das, Madhabananda},
year = {2016},
series = {Proceedings - {{International Conference}} on {{Computational Intelligence}} and {{Networks}}},
volume = {2016-January},
pages = {112--117},
doi = {10.1109/CINE.2016.27},
abstract = {For a successful software project, accurate prediction of its overall effort and cost estimation is a very much essential task. Software projects have evolved through a number of development models over the last few decades. Hence, to cover an accurate measurement of the effort and cost for different software projects based on different development models having new and innovative phases of software development, is a crucial task to be done. An accurate prediction always leads to a successful software project within the budget with no delay, but any percentage of misconduct in the overall effort and cost estimate may lead to a project failure in terms of delivery time, budget or features. Software industries have adopted various development models based on the project requirements and organization's capabilities. Due to adaptability to changes in a software project, agile software development model has become a much successful and popular framework for development over the last decade. The customer is involved as an active participant in the development using an agile framework. Hence, changes can occur at any phase of development and they can be dynamic in nature. That is why an accurate prediction of effort and cost of such projects is a crucial task to be done as the complexity of overall development structure is increased with the time. Soft computing techniques have proven that they are one of the best problem solving techniques in such scenarios. Such techniques are more flexible and presence of bio-intelligence increases their accuracy. Genetic Algorithm (GA), Particle Swarm Optimization (PSO), Artificial Neural Network (ANN), Fuzzy Inference Systems (FIS), etc. are applied successfully for estimation of cost and effort of agile based software projects. This paper deals with such soft computing techniques and provides a detailed and analytical overview of such methods. It also provides the future scope and possibilities to explore such techniques on the basis of survey provided by this paper. \textcopyright{} 2016 IEEE.},
publication_stage = {Final},
source = {Scopus}
}
@article{blackwell2019:fifty,
title = {Fifty Years of the Psychology of Programming},
author = {Blackwell, Alan F. and Petre, Marian and Church, Luke},
year = {2019},
month = nov,
journal = {International Journal of Human-Computer Studies},
series = {50 Years of the {{International Journal}} of {{Human-Computer Studies}}. {{Reflections}} on the Past, Present and Future of Human-Centred Technologies},
volume = {131},
pages = {52--63},
issn = {1071-5819},
doi = {10.1016/j.ijhcs.2019.06.009},
urldate = {2023-01-15},
abstract = {This paper reflects on the evolution (past, present and future) of the `psychology of programming' over the 50 year period of this anniversary issue. The International Journal of Human-Computer Studies (IJHCS) has been a key venue for much seminal work in this field, including its first foundations, and we review the changing research concerns seen in publications over these five decades. We relate this thematic evolution to research taking place over the same period within more specialist communities, especially the Psychology of Programming Interest Group (PPIG), the Empirical Studies of Programming series (ESP), and the ongoing community in Visual Languages and Human-Centric Computing (VL/HCC). Many other communities have interacted with psychology of programming, both influenced by research published within the specialist groups, and in turn influencing research priorities. We end with an overview of the core theories that have been developed over this period, as an introductory resource for new researchers, and also with the authors' own analysis of key priorities for future research.},
langid = {english},
keywords = {Attention investment,Programming languages,Psychology of programming}
}
@article{blake2021:impact,
title = {Impact of {{Artificial Intelligence}} on {{Engineering}}: {{Past}}, {{Present}} and {{Future}}},
shorttitle = {Impact of {{Artificial Intelligence}} on {{Engineering}}},
author = {Blake, Robert W. and Mathew, Robins and George, Abraham and Papakostas, Nikolaos},
year = {2021},
month = jan,
journal = {Procedia CIRP},
series = {54th {{CIRP CMS}} 2021 - {{Towards Digitalized Manufacturing}} 4.0},
volume = {104},
pages = {1728--1733},
issn = {2212-8271},
doi = {10.1016/j.procir.2021.11.291},
urldate = {2023-01-15},
abstract = {Recent advancements in cloud computing and software technology have resulted in the development of powerful Artificial Intelligence (AI) tools for engineering applications. However, the impact of AI in future engineering jobs remains ambiguous. This paper discusses recent AI developments, AI applications, the influence of AI on the Engineering profession, and the productivity of engineers. In addition, ethics, and professional impacts to be considered with the introduction of AI are addressed. The results of a survey conducted among people from Engineering colleges across Ireland are also presented.},
langid = {english},
keywords = {Artificial Intelligence,Deep Learning,Ethics,Industry 4.0,Machine Learning},
file = {/Users/guru/Zotero/storage/HTNQU6SJ/Blake et al_2021_Impact of Artificial Intelligence on Engineering_Procedia CIRP.pdf;/Users/guru/Zotero/storage/GN7IKAYB/S2212827121011896.html}
}
@inproceedings{Boerstra2022305,
type = {Conference Paper},
title = {Stronger Together: {{On}} Combining Relationships in Architectural Recovery Approaches},
author = {Boerstra, Evelien and Ahn, John and Rubin, Julia},
year = {2022},
series = {Proceedings - 2022 {{IEEE International Conference}} on {{Software Maintenance}} and {{Evolution}}, {{ICSME}} 2022},
pages = {305--316},
doi = {10.1109/ICSME55016.2022.00035},
abstract = {Architecture recovery is the process of obtaining the intended architecture of a software system by analyzing its implementation. Most existing architectural recovery approaches rely on extracting information about relationships between code entities and then use the extracted information to group closely related entities together. The approaches differ by the type of relationships they consider, e.g., method calls, data dependencies, and class name similarity. Prior work shows that combining multiple types of relationships during the recovery process is often beneficial as it leads to a better result than the one obtained by using the relationships individually. Yet, most, if not all, academic and industrial architecture recovery approaches simply unify the combined relationships to produce a more complete representation of the analyzed systems. In this paper, we propose and evaluate an alternative approach to combining information derived from multiple relationships, which is based on identifying agreements/disagreements between relationship types. We discuss advantages and disadvantages of both approaches and provide suggestions for future research in this area. \textcopyright{} 2022 IEEE.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{Borg202222,
type = {Conference Paper},
title = {Quality Assurance of Generative Dialog Models in an Evolving Conversational Agent Used for Swedish Language Practice},
author = {Borg, Markus and Bengtsson, Johan and Osterling, Harald and Hagelborn, Alexander and Gagner, Isabella and Tomaszewski, Piotr},
year = {2022},
series = {Proceedings - 1st {{International Conference}} on {{AI Engineering}} - {{Software Engineering}} for {{AI}}, {{CAIN}} 2022},
pages = {22--32},
doi = {10.1145/3522664.3528592},
abstract = {Due to the migration megatrend, efficient and effective second-language acquisition is vital. One proposed solution involves AI-enabled conversational agents for person-centered interactive language practice. We present results from ongoing action research targeting quality assurance of proprietary generative dialog models trained for virtual job interviews. The action team elicited a set of 38 requirements for which we designed corresponding automated test cases for 15 of particular interest to the evolving solution. Our results show that six of the test case designs can detect meaningful differences between candidate models. While quality assurance of natural language processing applications is complex, we provide initial steps toward an automated framework for machine learning model selection in the context of an evolving conversational agent. Future work will focus on model selection in an MLOps setting. \textcopyright{} 2022 ACM.},
publication_stage = {Final},
source = {Scopus}
}
@article{börsting2022:software,
title = {Software {{Engineering}} for {{Augmented Reality}} - {{A Research Agenda}}},
author = {B{\"o}rsting, Ingo and Heikamp, Markus and Hesenius, Marc and Koop, Wilhelm and Gruhn, Volker},
year = {2022},
month = jun,
journal = {Proceedings of the ACM on Human-Computer Interaction},
volume = {6},
number = {EICS},
pages = {155:1--155:34},
doi = {10.1145/3532205},
urldate = {2023-01-15},
abstract = {Augmented reality changes the way we perceive reality and how we interact with computers. However, we argue that to create augmented reality solutions, we need to rethink the way we develop software. In this paper, we review the state of the art in software engineering for augmented reality applications, derive open questions, and define a research agenda. For this purpose, we consider different engineering phases and evaluate conventional techniques regarding their applicability for AR development. In requirements engineering, we found the integration of AR experts and the associated collaboration between actors to be of key aspect in the development process. Additionally, requirements about the physical world must be considered, which in turn has a huge impact on UI design. The relevance of the physical environment is not yet sufficiently addressed in applicable techniques, which also applies to current implementation frameworks and tools, complicating the AR development process. When evaluating AR software iterations, we found interaction testing and test automation to have great potential, although they have not yet been sufficiently researched. Our paper contributes to AR research by revealing current core challenges within the AR development process and formulating explicit research questions that should be considered by future research.},
keywords = {augmented reality,frameworks,methods,software engineering,tools}
}
@article{brad2014:smart,
title = {Smart {{Deployment}} of {{Demonstrators}} into {{Successful Commercial Solutions}}},
author = {Brad, Stelian and Fulea, Mircea and Brad, Emilia and Mocan, Bogdan},
year = {2014},
month = jan,
journal = {Procedia CIRP},
series = {24th {{CIRP Design Conference}}},
volume = {21},
pages = {503--508},
issn = {2212-8271},
doi = {10.1016/j.procir.2014.03.137},
urldate = {2023-01-15},
abstract = {Product or service concepts based on emerging technologies are usually results of research projects, be they performed by academic groups or by research departments of companies. Many times, the prototypes or demonstrators that result from such projects are supposed to evolve into commercial products or services, but \textendash{} at least in the first stage - there are more focus on proving key features of a technology, or the effectiveness / efficiency / applicability of various concepts or algorithms. However, evolving into commercial products is many times at least as challenging as building the prototypes. In case of software-based projects, this means changes in architecture, a lot of code rewriting and important usability improvements. This paper introduces a software-concept product design algorithm which aims to minimize the effort required in turning a demonstrator into a commercial product. This is done by generating two functionality sets: a pure demonstrator and a pure commercial one, then generating a hybrid functionality set with the corresponding architecture, and then assessing each functionality for the demonstrator and the commercial version in terms of development and improvement effort. Through iterations, in which the original functionality sets are improved, the difference between the two perspectives will be reduced until it gets below a reasonable limit in terms of effort. The paper presents a case study in which the algorithm is applied for planning a software platform for supporting SMEs in their innovation processes.},
langid = {english},
keywords = {innovation,methodology,product deployment,product planning,software engineering},
file = {/Users/guru/Zotero/storage/34UMSTP5/Brad et al_2014_Smart Deployment of Demonstrators into Successful Commercial Solutions_Procedia CIRP.pdf;/Users/guru/Zotero/storage/U7DAI4DH/S2212827114006775.html}
}
@inproceedings{Brad2014503,
type = {Conference Paper},
title = {Smart Deployment of Demonstrators into Successful Commercial Solutions},
author = {Brad, Stelian and Fulea, Mircea and Brad, Emilia and Mocan, Bogdan},
year = {2014},
series = {Procedia {{CIRP}}},
volume = {21},
pages = {503--508},
doi = {10.1016/j.procir.2014.03.137},
abstract = {Product or service concepts based on emerging technologies are usually results of research projects, be they performed by academic groups or by research departments of companies. Many times, the prototypes or demonstrators that result from such projects are supposed to evolve into commercial products or services, but - At least in the first stage - There are more focus on proving key features of a technology, or the effectiveness / efficiency / applicability of various concepts or algorithms. However, evolving into commercial products is many times at least as challenging as building the prototypes. In case of software-based projects, this means changes in architecture, a lot of code rewriting and important usability improvements. This paper introduces a software-concept product design algorithm which aims to minimize the effort required in turning a demonstrator into a commercial product. This is done by generating two functionality sets: A pure demonstrator and a pure commercial one, then generating a hybrid functionality set with the corresponding architecture, and then assessing each functionality for the demonstrator and the commercial version in terms of development and improvement effort. Through iterations, in which the original functionality sets are improved, the difference between the two perspectives will be reduced until it gets below a reasonable limit in terms of effort. The paper presents a case study in which the algorithm is applied for planning a software platform for supporting SMEs in their innovation processes. \textcopyright{} 2014 Published by Elsevier B.V.},
publication_stage = {Final},
source = {Scopus}
}
@article{bruneliere2022:aidoart,
title = {{{AIDOaRt}}: {{AI-augmented Automation}} for {{DevOps}}, a Model-Based Framework for Continuous Development in {{Cyber}}\textendash{{Physical Systems}}},
shorttitle = {{{AIDOaRt}}},
author = {Bruneliere, Hugo and Muttillo, Vittoriano and Eramo, Romina and Berardinelli, Luca and G{\'o}mez, Abel and Bagnato, Alessandra and Sadovykh, Andrey and Cicchetti, Antonio},
year = {2022},
month = oct,
journal = {Microprocessors and Microsystems},
volume = {94},
pages = {104672},
issn = {0141-9331},
doi = {10.1016/j.micpro.2022.104672},
urldate = {2023-01-15},
abstract = {The advent of complex Cyber\textendash Physical Systems (CPSs) creates the need for more efficient engineering processes. Recently, DevOps promoted the idea of considering a closer continuous integration between system development (including its design) and operational deployment. Despite their use being still currently limited, Artificial Intelligence (AI) techniques are suitable candidates for improving such system engineering activities (cf. AIOps). In this context, AIDOaRT is a large European collaborative project that aims at providing AI-augmented automation capabilities to better support the modeling, coding, testing, monitoring, and continuous development of CPSs. The project proposes to combine Model Driven Engineering principles and techniques with AI-enhanced methods and tools for engineering more trustable CPSs. The resulting framework will (1) enable the dynamic observation and analysis of system data collected at both runtime and design time and (2) provide dedicated AI-augmented solutions that will then be validated in concrete industrial cases. This paper describes the main research objectives and underlying paradigms of the AIDOaRt project. It also introduces the conceptual architecture and proposed approach of the AIDOaRt overall solution. Finally, it reports on the actual project practices and discusses the current results and future plans.},
langid = {english},
keywords = {AIOps,Artificial Intelligence,Continuous development,Cyber–Physical Systems,DevOps,Model Driven Engineering,Software engineering,System engineering},
file = {/Users/guru/Zotero/storage/94A582QU/S0141933122002022.html}
}
@article{byrne2018:biologicalisation,
title = {Biologicalisation: {{Biological}} Transformation in Manufacturing},
shorttitle = {Biologicalisation},
author = {Byrne, Gerald and Dimitrov, Dimitri and Monostori, Laszlo and Teti, Roberto and {van Houten}, Fred and Wertheim, Rafi},
year = {2018},
month = may,
journal = {CIRP Journal of Manufacturing Science and Technology},
volume = {21},
pages = {1--32},
issn = {1755-5817},
doi = {10.1016/j.cirpj.2018.03.003},
urldate = {2023-01-15},
abstract = {A new emerging frontier in the evolution of the digitalisation and the 4th industrial revolution (Industry 4.0) is considered to be that of ``Biologicalisation in Manufacturing''. This has been defined by the authors to be ``The use and integration of biological and bio-inspired principles, materials, functions, structures and resources for intelligent and sustainable manufacturing technologies and systems with the aim of achieving their full potential.'' In this White Paper, detailed consideration is given to the meaning and implications of ``Biologicalisation'' from the perspective of the design, function and operation of products, manufacturing processes, manufacturing systems, supply chains and organisations. The drivers and influencing factors are also reviewed in detail and in the context of significant developments in materials science and engineering. The paper attempts to test the hypothesis of this topic as a breaking new frontier and to provide a vision for the development of manufacturing science and technology from the perspective of incorporating inspiration from biological systems. Seven recommendations are delivered aimed at policy makers, at funding agencies, at the manufacturing research community and at those industries involved in the development of next generation manufacturing technology and systems. It is concluded that it is valid to argue that Biologicalisation in Manufacturing truly represents a new and breaking frontier of digitalisation and Industry 4.0 and that the market potential is very strong. It is evident that extensive research and development is required in order to maximise on the benefits of a biological transformation.},
langid = {english},
keywords = {Bio-inspired,Bio-integrated,Bio-intelligent,Biological transformation,Biologicalisation in Manufacturing,Cyber-physical systems,Digitalisation,Industrie 4.0,Industry 4.0,International perspective,Manufacturing},
file = {/Users/guru/Zotero/storage/C6CGA3ND/Byrne et al_2018_Biologicalisation_CIRP Journal of Manufacturing Science and Technology.pdf;/Users/guru/Zotero/storage/F6PY3BXA/S1755581718300129.html}
}
@article{byrne2022:study,
title = {A {{Study}} of the {{Organisational Behaviour}} of {{Software Test Engineers}}, {{Contributing}} to the {{Digital Transformation}} of {{Banks}} in the {{Irish Financial Sector}}},
author = {Byrne, Darren and Tuite, Aisling and Organ, John},
year = {2022},
month = jan,
journal = {IFAC-PapersOnLine},
series = {21st {{IFAC Conference}} on {{Technology}}, {{Culture}} and {{International Stability TECIS}} 2022},
volume = {55},
number = {39},
pages = {259--264},
issn = {2405-8963},
doi = {10.1016/j.ifacol.2022.12.031},
urldate = {2023-01-15},
abstract = {This initial exploratory paper will endeavour to understand theoretically how Software Test Engineers (STEs) behave within the organisational framework of an Irish financial institution (IFI), in terms of the development/execution of test cases, based on variables such as experience, the nature of the project in question and the environment of the IFI. Whist the research is currently in the initial stages, it will focus on the exploration of insider research and auto-ethnographic research methods, subsequently progressing to the delineation of how such knowledge relating to the various methods/tools used by STEs can be leveraged to improve performance and returns on investment for both STEs/IFIs respectively. The research will employ qualitative, inductive, and interpretive methods/methodologies to achieve its aim/objectives. This paper will focus on one specific research objective - the examination of individual STE behaviour within an IFI. Interviews will be conducted with participants working in a variety of roles (including STEs, Project Managers, Business Analysts, etc.,) to gather stories relating to the behaviour of STEs within a major IFI. Insider research methods will be used, supported by auto-ethnographic methods of reflexivity to help ensure research validity. Once the primary data collection phase is completed, it will be analysed/interpreted through theoretical frameworks within the discipline of organisational behaviour (OB). As this is inductive research, contributions to both theory and practice will emerge as the research process develops, but is expected to contribute to practice by highlighting the OB of a specific team with growing prominence/importance within the organisation, as it goes through a process of change to digital-first customer interactions. Additionally, the research will contribute to the methods of insider research through transparent reflection regarding the practical/philosophical challenges of this form of ethnographic research.},
langid = {english},
keywords = {digital transformation,financial technology,fintech,Irish financial industry,organisational behaviour,software testing},
file = {/Users/guru/Zotero/storage/3EYSJK3E/S2405896322030713.html}
}
@article{cachero2023:influence,
title = {Influence of Personality and Modality on Peer Assessment Evaluation Perceptions Using {{Machine Learning}} Techniques},
author = {Cachero, Cristina and {Rico-Juan}, Juan Ram{\'o}n and Maci{\`a}, Hermenegilda},
year = {2023},
month = mar,
journal = {Expert Systems with Applications},
volume = {213},
pages = {119150},
issn = {0957-4174},
doi = {10.1016/j.eswa.2022.119150},
urldate = {2023-01-15},
abstract = {The successful instructional design of self and peer assessment in higher education poses several challenges that instructors need to be aware of. One of these is the influence of students' personalities on their intention to adopt peer assessment. This paper presents a quasi-experiment in which 85 participants, enrolled in the first-year of a Computer Engineering programme, were assessed regarding their personality and their acceptance of three modalities of peer assessment (individual, pairs, in threes). Following a within-subjects design, the students applied the three modalities, in a different order, with three different activities. An analysis of the resulting 1195 observations using ML techniques shows how the Random Forest algorithm yields significantly better predictions for three out of the four adoption variables included in the study. Additionally, the application of a set of eXplainable Artificial Intelligence (XAI) techniques shows that Agreeableness is the best predictor of Usefulness and Ease of Use, while Extraversion is the best predictor of Compatibility, and Neuroticism has the greatest impact on global Intention to Use. The discussion highlights how, as it happens with other innovations in educational processes, low levels of Consciousness is the most consistent predictor of resistance to the introduction of peer assessment processes in the classroom. Also, it stresses the value of peer assessment to augment the positive feelings of students scoring high on Neuroticism, which could lead to better performance. Finally, the low impact of the peer assessment modality on student perceptions compared to personality variables is debated.},
langid = {english},
keywords = {eXplainable Artificial Intelligence (XAI),Machine learning (ML),Peer assessment (PA),Personality,Quasi-experiment,Use behaviour},
file = {/Users/guru/Zotero/storage/DI9JCCSR/Cachero et al_2023_Influence of personality and modality on peer assessment evaluation perceptions_Expert Systems with Applications.pdf;/Users/guru/Zotero/storage/ZESBLKD5/S0957417422021686.html}
}
@inproceedings{campeanu2017:runtime,
title = {Run-Time Component Allocation in {{CPU-GPU}} Embedded Systems},
booktitle = {Proceedings of the {{Symposium}} on {{Applied Computing}}},
author = {Campeanu, Gabriel and Saadatmand, Mehrdad},
year = {2017},
month = apr,
series = {{{SAC}} '17},
pages = {1259--1265},
publisher = {{Association for Computing Machinery}},
address = {{New York, NY, USA}},
doi = {10.1145/3019612.3019785},
urldate = {2023-01-15},
abstract = {Nowadays, many of the modern embedded applications such as vehicles and robots, interact with the environment and receive huge amount of data through various sensors such as cameras and radars. The challenge of processing large amount of data, within an acceptable performance, is solved by employing embedded systems that incorporate complementary attributes of CPUs and Graphics Processing Units (GPUs), i.e., sequential and parallel execution models. Component-based development (CBD) is a software engineering methodology that augments the applications development through reuse of software blocks known as components. In developing a CPU-GPU embedded application using CBD, allocation of components to different processing units of the platform is an important activity which can affect the overall performance of the system. In this context, there is also often the need to support and achieve run-time component allocation due to various factors and situations that can happen during system execution, such as switching off parts of the system for energy saving. In this paper, we provide a solution that dynamically allocates components using various system information such as the available resources (e.g., available GPU memory) and the software behavior (e.g., in terms of GPU memory usage). The novelty of our work is a formal allocation model that considers GPU system characteristics computed on-the-fly through software monitoring solutions. For the presentation and validation of our solution, we utilize an existing underwater robot demonstrator.},
isbn = {978-1-4503-4486-9},
keywords = {component allocation,component-based development,CPU-GPU,dynamic allocation,embedded systems,GPU,GPU monitoring,monitor},
file = {/Users/guru/Zotero/storage/MTEGLIMB/Campeanu_Saadatmand_2017_Run-time component allocation in CPU-GPU embedded systems_Proceedings of the Symposium on Applied Computing.pdf}
}
@article{Canedo20221527,
type = {Article},
title = {Creativity and Design Thinking as Facilitators in Requirements Elicitation},
author = {Canedo, Edna Dias and Calazans, Angelica Toffano Seidel and Silva, Geovana Ramos Sousa and Costa, Pedro Henrique Teixeira and De Mesquita, Rodrigo Pereira and Masson, Eloisa Toffano Seidel},
year = {2022},
journal = {International Journal of Software Engineering and Knowledge Engineering},
volume = {32},
number = {10},
pages = {1527--1558},
doi = {10.1142/S0218194022500607},
abstract = {Context: The use of Creativity and Design Thinking (C\&DT) techniques favor the generation of new ideas based on the needs of users and stakeholders, and can support software developers during the process of requirements elicitation. Objectives: In this work, we aim to identify C\&DT techniques to perform requirements elicitation proposed in the literature and in the industry and investigate the perception of software developers about using these techniques. Methods: We conducted a systematic literature review (SLR) to identify the C\&DT techniques in the literature and a regional survey with software development teams from several companies in Brazil to identify which techniques found in the literature are currently being used by organizations. The survey also investigated the level of knowledge that software developers have regarding the C\&DT techniques, and whether they agree that the use of these techniques can help to achieve a more effective process of requirements elicitation. Results: In the SLR, we identified 86 C\&DT techniques that support requirement elicitation activities. In the survey, most developers outlined that C\&DT techniques facilitate requirements elicitation and stated that they have more knowledge and usage experience with DT techniques than creativity techniques. The most used DT techniques mentioned by survey participants were: interview, brainstorming, uses cases, activity analysis, user story, and rapid prototyping, whereas for creativity techniques were: analogies, creativity workshops, focus group, questions list, clarification, none and combining ideas. Conclusions: The results showed that despite the existence of a large number of techniques in the literature, the developers' lack of knowledge about these techniques makes them not used in the industry. However, the developers' responses showed that the use of C\&DT techniques helps to make requirements elicitation more effective. \textcopyright{} 2022 World Scientific Publishing Company.},
publication_stage = {Final},
source = {Scopus}
}
@inproceedings{carbone2020:radically,
title = {Radically Simplifying Game Engines: {{AI}} Emotions \& Game Self-Evolution},
booktitle = {2020 International Conference on Computational Science and Computational Intelligence ({{CSCI}})},
author = {Carbone, John N. and Crowder, James and Carbone, Ryan A.},
year = {2020},
month = dec,
pages = {464--472},
doi = {10.1109/CSCI51800.2020.00085},
abstract = {Today, video games are a multi-billion-dollar industry, continuously evolving through the incorporation of new technologies and innovative design. However, current video game software content creation requires extensive and often-times ambiguous planning phases for developing aesthetics, online capabilities, and gameplay mechanics. Design elements can vary significantly relative to the expertise of artists, designers, budget, and overall game engine/software features and capabilities. Game development processes are often extensively long coding sessions, usually involving a highly iterative creative process, where user requirements are rarely provided. Therefore, we propose significantly simplifying game design and development with novel Artificial Cognition Architecture real-time scalability and dynamic emotion core. Rather than utilizing more static emotion state weighting emotion engines (e.g. ExAI), we leverage significant ACA research in successful implementation of analog neural learning bots with Maslowan objective function algorithms. We also leverage AI- based Artificial Psychology software which utilizes ACA's fine grained self-evolving emotion modeling in humanistic avatar patients for Psychologist training. An ACA common cognitive core provides the gaming industry with wider applications across video game genres. A modular, scalable, and cognitive emotion game architecture implements Non-Playable Character (NPC) learning and self-evolution. ACA models NPC's with fine grained emotions, providing interactive dynamic personality traits for a more realistic game environment and enables NPC self-evolution under the influence of both other NPC's and players. Furthermore, we explore current video game design engine architecture (e.g. Unity, Unreal Engine) and propose an ACA integration approach. We apply artificial cognition and emotion intelligence modeling to engender video games with more distinct, realistic consumer gaming experiences, while simultaneously minimizing software gaming development efforts and costs.},
keywords = {Artificial Cognition,Artificial Intelligence,Artificial Psychology,Autonomy,Computational modeling,Computer architecture,Emotion Modeling,Games,Industries,Psychology,Self-Evolving,Software,Software Development,Training,Video Games},
file = {/Users/guru/Zotero/storage/W3F2T9C9/Carbone et al. - 2020 - Radically simplifying game engines AI emotions & .pdf}
}
@article{caron2016internet,
title = {The {{Internet}} of {{Things}} ({{IoT}}) and Its Impact on Individual Privacy: {{An Australian}} Perspective},
author = {Caron, Xavier and Bosua, Rachelle and Maynard, Sean B and Ahmad, Atif},
year = {2016},
journal = {Computer Law \& Security Review},
volume = {32},
number = {1},
pages = {4--15},
publisher = {{Elsevier}}
}
@article{carvalho2020:computation,
title = {Computation Offloading in {{Edge Computing}} Environments Using {{Artificial Intelligence}} Techniques},
author = {Carvalho, Gon{\c c}alo and Cabral, Bruno and Pereira, Vasco and Bernardino, Jorge},
year = {2020},
month = oct,
journal = {Engineering Applications of Artificial Intelligence},
volume = {95},
pages = {103840},
issn = {0952-1976},
doi = {10.1016/j.engappai.2020.103840},
urldate = {2023-01-15},
abstract = {Edge Computing (EC) is a recent architectural paradigm that brings computation close to end-users with the aim of reducing latency and bandwidth bottlenecks, which 5G technologies are committed to further reduce, while also achieving higher reliability. EC enables computation offloading from end devices to edge nodes. Deciding whether a task should be offloaded, or not, is not trivial. Moreover, deciding when and where to offload a task makes things even harder and making inadequate or off-time decisions can undermine the EC approach. Recently, Artificial Intelligence (AI) techniques, such as Machine Learning (ML), have been used to help EC systems cope with this problem. AI promises accurate decisions, higher adaptability and portability, thus diminishing the cost of decision-making and the probability of error. In this work, we perform a literature review on computation offloading in EC systems with and without AI techniques. We analyze several AI techniques, especially ML-based, that display promising results, overcoming the shortcomings of current approaches for computing offloading coordination We sorted the ML algorithms into classes for better analysis and provide an in-depth analysis on the use of AI for offloading, in particular, in the use case of offloading in Vehicular Edge Computing Networks, actually one technology that gained more relevance in the last years, enabling a vast amount of solutions for computation and data offloading. We also discuss the main advantages and limitations of offloading, with and without the use of AI techniques.},
langid = {english},
keywords = {Artificial Intelligence,Computation offloading,Edge Computing,Machine Learning},
file = {/Users/guru/Zotero/storage/T5G8VER9/S0952197620302050.html}
}
@article{casillo2022:detecting,
title = {Detecting Privacy Requirements from {{User Stories}} with {{NLP}} Transfer Learning Models},
author = {Casillo, Francesco and Deufemia, Vincenzo and Gravino, Carmine},
year = {2022},
month = jun,
journal = {Information and Software Technology},
volume = {146},
pages = {106853},
issn = {0950-5849},
doi = {10.1016/j.infsof.2022.106853},
urldate = {2023-01-15},
abstract = {Context: To provide privacy-aware software systems, it is crucial to consider privacy from the very beginning of the development. However, developers do not have the expertise and the knowledge required to embed the legal and social requirements for data protection into software systems. Objective: We present an approach to decrease privacy risks during agile software development by automatically detecting privacy-related information in the context of user story requirements, a prominent notation in agile Requirement Engineering (RE). Methods: The proposed approach combines Natural Language Processing (NLP) and linguistic resources with deep learning algorithms to identify privacy aspects into User Stories. NLP technologies are used to extract information regarding the semantic and syntactic structure of the text. This information is then processed by a pre-trained convolutional neural network, which paved the way for the implementation of a Transfer Learning technique. We evaluate the proposed approach by performing an empirical study with a dataset of 1680 user stories. Results: The experimental results show that deep learning algorithms allow to obtain better predictions than those achieved with conventional (shallow) machine learning methods. Moreover, the application of Transfer Learning allows to considerably improve the accuracy of the predictions, ca. 10\%. Conclusions: Our study contributes to encourage software engineering researchers in considering the opportunities to automate privacy detection in the early phase of design, by also exploiting transfer learning models.},
langid = {english},
keywords = {Deep learning,Natural Language Processing,Transfer Learning,User Stories},
file = {/Users/guru/Zotero/storage/TGJKW5MA/S0950584922000246.html}
}
@article{cassoli2022:frameworks,
title = {Frameworks for Data-Driven Quality Management in Cyber-Physical Systems for Manufacturing: {{A}} Systematic Review},
shorttitle = {Frameworks for Data-Driven Quality Management in Cyber-Physical Systems for Manufacturing},
author = {Cassoli, Beatriz Bretones and Jourdan, Nicolas and Nguyen, Phu H. and Sen, Sagar and {Garcia-Ceja}, Enrique and Metternich, Joachim},
year = {2022},
month = jan,
journal = {Procedia CIRP},
series = {15th {{CIRP Conference}} on {{Intelligent Computation}} in {{ManufacturingEngineering}}, 14-16 {{July}} 2021},
volume = {112},
pages = {567--572},
issn = {2212-8271},
doi = {10.1016/j.procir.2022.09.062},
urldate = {2023-01-15},
abstract = {Recent advances in the manufacturing industry have enabled the deployment of Cyber-Physical Systems (CPS) at scale. By utilizing advanced analytics, data from production can be analyzed and used to monitor and improve the process and product quality. Many frameworks for implementing CPS have been developed to structure the relationship between the digital and the physical worlds. However, there is no systematic review of the existing frameworks related to quality management in manufacturing CPS. Thus, our study aims at determining and comparing the existing frameworks. The systematic review yielded 38 frameworks analyzed regarding their characteristics, use of data science and Machine Learning (ML), and shortcomings and open research issues. The identified issues mainly relate to limitations in cross-industry/cross-process applicability, the use of ML, big data handling, and data security.},
langid = {english},
keywords = {Artificial Intelligence (AI),Cyber-Physical Systems (CPS),Framework,Quality Management,Systematic Literature Review}
}
@article{cerdeiral2019software,
title = {Software Project Management in High Maturity: {{A}} Systematic Literature Mapping},
author = {Cerdeiral, Cristina T and Santos, Gleison},
year = {2019},
journal = {Journal of Systems and Software},
volume = {148},
pages = {56--87},
publisher = {{Elsevier}}
}
@inproceedings{Chagas2020101,
type = {Conference Paper},
title = {On the Reuse of Knowledge to Develop Intelligent Software Engineering Solutions},
author = {Chagas, Jos{\'e} Ferdinandy Silva and {de Sousa Neto}, Ademar Fran{\c c}a and Almeida, Hyggo and Silva, Luiz Antonio Pereira and Albuquerque, Danyllo and Perkusich, Mirko and Valadares, Dalton C{\'e}zane Gomes and Perkusich, Angelo},
year = {2020},
series = {Proceedings of the {{International Conference}} on {{Software Engineering}} and {{Knowledge Engineering}}, {{SEKE}}},
volume = {PartF162440},
pages = {101--106},
doi = {10.18293/SEKE2020-157},
abstract = {Intelligent Software Engineering (ISE) is currently a hot topic in research. Besides being a promising field, it brings many challenges. Therefore, there is a need for guidelines to help researchers to build an ISE solution. The goal of this study is to identify patterns in developing ISE solutions. For this purpose, we analyzed 42 studies, using a thematic analysis approach, to understand how they reused knowledge and applied it to solve a SE task. As a result, we developed a thematic network composed of the main concepts related to knowledge reuse for ISE. Further, we identified that researchers use external and internal knowledge sources, and mostly rely on structured data to develop ISE solutions. Despite this, there are alternatives such as eliciting data from humans and literature to identify metrics or build knowledge-based systems. Overall, we concluded that there many research opportunities to guide the construction of ISE solutions. \textcopyright{} 2020 Knowledge Systems Institute Graduate School. All rights reserved.},
publication_stage = {Final},
source = {Scopus},
keywords = {primary},
file = {/Users/guru/Zotero/storage/YS2IMQ6E/Chagas et al. - 2020 - On the reuse of knowledge to develop intelligent s.pdf}
}
@inproceedings{Chanda201953,
type = {Conference Paper},
title = {Ai Based Data Architecture Impact Analysis},
author = {Chanda, Debasis},
year = {2019},
series = {{{EPiC Series}} in {{Computing}}},
volume = {63},
pages = {53--62},
doi = {10.29007/fkhl},
abstract = {Enterprises today are technology driven and comprise of plethora of applications that may be categorized based on the technology that they are developed and deployed on. For enterprises that have existed across years and across multiple business cycles, the technologies may be classified as legacy, mature or emerging. The challenge lies in interoperability within and without the organization, especially with respect to the business objects that are required across business functions, to realize the capabilities of the organization. This is also true for scenarios of M\&As (Mergers \& Acquisitions) and also during creation of JVs (Joint Ventures). Enterprise Architecture (EA) defines the Business-Technology alignment in organizations, and is an established methodology for business transformation and establishing enterprise maturity in the keenly competitive business world. Business objects are defined as Data Architecture artifacts within the ambit of EA. The challenges to business object interoperability arises due to the incompatibility of technologies used by the applications. This leads to the well explored n*(n-1) scenario, where n is the number of application interfaces. This has serious implications towards business health of the organization, and risk to the BAU (Business As Usual) of the organization. This is because in a complex mesh like n*(n-1) scenario, it becomes practically impossible to identify the impact of changes to business capabilities in an inconspicuous attribute of a business object in an application domain. Thus the impact analysis of business objects / data as defined by traditional description is a challenge to business sustainability of organizations. These challenges in data architecture impact analysis may be mitigated by the AI (Artificial Intelligence) paradigm, by taking recourse to the very powerful features of AI, by defining predicate calculus based knowledge bases. In our paper we consider the Banking domain for carrying out our discussions. \textcopyright{} 2019, EasyChair. All rights reserved.},
publication_stage = {Final},
source = {Scopus}
}
@article{chang2016:review,
title = {A Review on Exception Analysis},
author = {Chang, Byeong-Mo and Choi, Kwanghoon},
year = {2016},
month = sep,
journal = {Information and Software Technology},
volume = {77},
pages = {1--16},
issn = {0950-5849},
doi = {10.1016/j.infsof.2016.05.003},
urldate = {2023-01-15},
abstract = {Context: Exception handling has become popular in most major programming languages, including Ada, C++, Java, and ML. Since exception handling was introduced in programming languages, there have been various kinds of exception analyses, which analyze exceptional behavior of programs statically or dynamically. Exception analyses have also been applied to various software engineering tasks such as testing, slicing, verification and visualization. Objective: This paper aims at providing a comprehensive view of studies on exception analysis. We conducted a review on exception analysis to identify and classify the studies. Method: We referred to the literature review method, and selected a comprehensive set of 87 papers on exception analysis from 515 papers published in journals and conference proceedings. The categorization and classification were done according to the research questions regarding to when they analyze, what they analyze, how to analyze, and applications of exception analysis. Results: We first identify three categories of static exception analysis and two categories of dynamic exception analysis together with the main applications of the exception analyses. We also discuss the main concepts, research methods used and major contributions of the studies on exception analysis. Conclusion: We have provided the comprehensive review of exception analysis. To the best of our knowledge, this is the first comprehensive review on exception analysis. As a further work, it would be interesting to see how the existing exception analysis techniques reviewed in this paper can be applied to other programming languages with exception handling mechanism, such as C\#, Scala, and Eiffel, which have been rarely explored.},
langid = {english},
keywords = {Debugging,Dynamic analysis,Exception analysis,Exception flow,Static analysis,Testing},
file = {/Users/guru/Zotero/storage/TU43UUZU/S0950584916300830.html}
}
@article{Chang2018,
type = {Article},
title = {Situation Analytics \textemdash{} at the Dawn of a New Software Engineering Paradigm},
author = {Chang, Carl K.},
year = {2018},
journal = {Science China Information Sciences},
volume = {61},
number = {5},
doi = {10.1007/s11432-017-9372-7},
abstract = {In this paper, I first review the seminal work by Thomas Kuhn \textemdash{} The Structure of Scientific Revolutions \textemdash{} and elaborate my view on paradigm shifts in software engineering research and practice as it turns 50 years old in 2018. I then examine major undertakings of the computing profession since early days of modern computing, especially those done by the software engineering community as a whole. I also enumerate anomalies and crises that occurred at various stages, and the attempts to provide solutions by the software engineering professionals in the past five decades. After providing such a background, I direct readers' attention toward emerging anomalies in software engineering, at a severity level that is causing another software engineering crisis, and suggest a set of criteria for feasible solutions. The main theme of this paper is to advocate that situation analytics, equipped with necessary definitions of essential concepts including situation and intention as parts of a new computational framework, can serve as the foundation for a new software engineering paradigm named the Situation-Centric Paradigm. In this framework, situation is considered a new abstraction for computing and is clearly differentiated from the widely accepted existing abstractions, namely function and object. I argue that the software engineering professionals will inevitably move into this new paradigm, willingly or unwillingly, to empower Human-Embedded Computing (HEC) and End-User Embedded Computing (EUEC), much more than what they have done with traditional humancentered or user-centric computing altogether. In the end, I speculate that an ultimate agile method may be on the rise, and challenge readers to contemplate ``what if'' hundreds of thousands ``end-user developers'' emerge into the scene where the boundaries between end users and developers become much more blurred. \textcopyright{} 2018, Science China Press and Springer-Verlag GmbH Germany, part of Springer Nature.},
publication_stage = {Final},
source = {Scopus}
}
@incollection{chang2020:chapter,
title = {Chapter 5 - {{Machine}} and {{Deep Learning}}},
booktitle = {Intelligence-{{Based Medicine}}},
author = {Chang, Anthony C.},
editor = {Chang, Anthony C.},
year = {2020},
month = jan,
pages = {67--140},
publisher = {{Academic Press}},
doi = {10.1016/B978-0-12-823337-5.00005-6},
urldate = {2023-01-15},
abstract = {Machine learning along with data mining comprise sub-disciplines under data science. There are several schools of machine learning, including symbolists, connectionists, revolutionaries, Bayesians, and analogizers. Machine learning with its sometimes tedious workflow differs significantly from conventional programming. Classical machine learning consists of supervised (classification and regression) and unsupervised (clustering and generalization) learning but also semi-supervised and ensemble learning. Deep learning consists of a range of methods including convolutional neural networks, recurrent neural networks, generative adversarial networks, and their derivatives. Deep reinforcement learning (such as deep Q network, or DQN) is becoming a valuable deep learning tool in biomedicine. Evaluation of these models includes methods such as receiver operating characteristic, precision-recall curve, and the F-1 measure in the confusion matrix. Finally, issues such as explainability, bias and variance, fitting, curse of dimensionality, and correlation vs causation are discussed.},
isbn = {978-0-12-823337-5},
langid = {english},
keywords = {bias and variance,classification,cluster analysis,convolutional neural network,deep learning,deep reinforcement learning,fitting,machine learning,precision recall curve,receiver operating characteristic,recurrent neural network,regression,supervised learning,unsupervised learning},
file = {/Users/guru/Zotero/storage/DEM4NRRN/B9780128233375000056.html}
}
@article{Chelouati2023,
type = {Article},
title = {Graphical Safety Assurance Case Using {{Goal Structuring Notation}} ({{GSN}}) \textemdash{} Challenges, Opportunities and a Framework for Autonomous Trains},
author = {Chelouati, Mohammed and Boussif, Abderraouf and Beugin, Julie and El Koursi, El-Miloudi},
year = {2023},
journal = {Reliability Engineering and System Safety},
volume = {230},
doi = {10.1016/j.ress.2022.108933},
abstract = {The development of fully autonomous vehicles is an ambition that took seed in the automotive industry a few years ago and is now growing in the railways considering their benefits. The main objective of autonomous train is to perform its operations and assure its mission with an acceptable safety level in all possible operational conditions. Such an objective needs to be supported by a safety demonstration. In order to authorize the operations of railway systems, they must be proven safe. This requires a technical and operational safety assessment, and also a safety assurance process during the system's whole life-cycle. The goal of such activities is to ensure that designed systems comply with railway safety standards and regulations. Both safety arguments and evidences are required to demonstrate that this compliance is achieved. These sets of evidence are documented in a so-called safety case. Recently, graphical safety cases, such as Goal Structuring Notation (GSN)-based safety case, have become an interesting alternative to narrative reports and plain texts. The graphical structure and visual properties improve the presentation and comprehension of the safety arguments. In this paper, we firstly review the use of the GSN for building graphical safety case for different transportation systems, with a focus on the railway domain. Then, we discuss the opportunities and challenges of considering such an approach in railway and we propose a high-level framework for building the GSN-based safety assurance case for the autonomous trains. \textcopyright{} 2022 Elsevier Ltd},
publication_stage = {Final},
source = {Scopus}
}
@article{chemingui2019:product,
title = {Product {{Line Configuration Meets Process Mining}}},
author = {Chemingui, Houssem and Gam, Ines and Mazo, Ra{\'u}l and Salinesi, Camille and Ghezala, Henda Ben},
year = {2019},
month = jan,
journal = {Procedia Computer Science},
series = {{{CENTERIS}} 2019 - {{International Conference}} on {{ENTERprise Information Systems}} / {{ProjMAN}} 2019 - {{International Conference}} on {{Project MANagement}} / {{HCist}} 2019 - {{International Conference}} on {{Health}} and {{Social Care Information Systems}} and {{Technologies}}, {{CENTERIS}}/{{ProjMAN}}/{{HCist}} 2019},
volume = {164},
pages = {199--210},
issn = {1877-0509},
doi = {10.1016/j.procs.2019.12.173},
urldate = {2023-01-15},
abstract = {Product line engineering is a new production paradigm that provides organizations a competitive edge by improving productivity and decreasing costs. The purpose with this new production paradigm is no longer to develop a single product but to develop a product family and to generate the products of the line through configuration processes. However, the potential benefits of product line engineering can be missed when dealing with large product lines because the configuration processes become error prone tasks. Consequently, guiding stakeholders during such complex configuration processes and recommending the best configuration alternatives until leading to a satisfying experience becomes a challenge. This paper focuses on enhancing product line configuration processes through process mining techniques. Therefore, user's actions of previous product line configurations are logged, mined and analyzed. We conducted a preliminary research to motivate the advantages of process mining in product line configuration and to explore what can process mining bring for configuration processes and how to use it to enhance configuration processes. Thus, guidance questions are sketched in order to position process mining as a solving tool for the configuration difficulties. Furthermore, we propose a reference architecture that considers process mining for configuring product lines.},
langid = {english},
keywords = {configuration difficulties,configuration process,enhancing,process mining,Product line engineering}
}