%gecco2024.awk Revision: 1.56 , `GECCOcomp' Tutorials.new 12 Aug 2024 %gecco2024.awk Revision: 1.56 , `GECCOcomp' Accepted-Competition-Entries.new 12 Aug 2024 %gecco2024.awk Revision: 1.56 , `GECCOcomp' Accepted-Hot-off-the-Press-Papers.new 12 Aug 2024 %gecco2024.awk Revision: 1.56 , `GECCOcomp' Accepted-Late-breaking-Abstracts.new 12 Aug 2024 %gecco2024.awk Revision: 1.56 , `GECCOcomp' Accepted-Posters.new 12 Aug 2024 %gecco2024.awk Revision: 1.56 , `GECCOcomp' Accepted-Workshop-Papers.new 12 Aug 2024 %gecco2024.awk Revision: 1.56 , `GECCOcomp' gecco2024comp_acm.bib.new 12 Aug 2024 @inproceedings(apeldoorn:2024:GECCOcomp, author = {Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer}, title = {{AbstractSwarm} {Multi-Agent} Logistics Competition: {Multi-Agent} Collaboration for Improving A Priori Unknown Logistics Scenarios}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {1--2}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {artificial intelligence, competition, logistics, multi-agent simulation, optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664053}, size = {2 pages}, abstract = {This competition abstract provides a brief introduction to the AbstractSwarm Multi-Agent Logistics Competition, which runs in 2024 in the fourth year in series at the Genetic and Evolutionary Computation Conference (GECCO) and has been part of the IEEE World Congress on Computational Intelligence from 2021 to 2022. In this work, we also briefly introduce the AbstractSwarm multi-agent modeling and simulation system to provide a common starting point for researchers interested in the competition's rules and its challenges.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(shu:2024:GECCOcomp, author = {Xiang Shu and Yiyi Zhu and Renji Zhang and Xiang Xia and Bingdong Li and Hong Qian}, title = {Automated Design Competition Technical Report: Cascaded Structure and Parameter Optimization Based on Prior Knowledge}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {3--4}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {cascaded optimization, evolutionary algorithms, framsticks}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664054}, size = {2 pages}, abstract = {The Automated Design Competition in GECCO'24 aims to find intelligent 3D agents that perform better in specific environments. To address this problem, this technical report proposes a Cascaded Structure and Parameter Optimization (CaSPO) framework. After constructing an initial population by using prior knowledge, CaSPO optimizes the mechanical structure, control system, and parameters sequentially to find good-performance agents. Experiments in different tasks and environments verify the effectiveness of the proposed CaSPO framework.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ahrari:2024:GECCOcomp, author = {Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael Epitropakis}, title = {Experimental Setup for {GECCO} 2024 Competition on Benchmarking Niching Methods for Multimodal Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {5--6}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {continuous multimodal optimization, niching, benchmarking, multiple global optima, composite function, evaluation}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664055}, size = {2 pages}, abstract = {This work presents the experimental setup for the GECCO 2024 competition on benchmarking niching methods for multimodal optimization. It employs 16 recently developed scalable composite problems to evaluate different aspects of multimodal optimization methods. These test problems are available in Python and MATLAB.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(karder:2024:GECCOcomp, author = {Johannes Karder and Sebastian Leitner and Bernhard Werth and Stefan Wagner}, title = {{DynStack} Competition - Dynamic Stacking Optimization in Uncertain Environments}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {7--8}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {DynStack, competition, optimization, dynamic, stacking, uncertainty}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664047}, size = {2 pages}, abstract = {This 2-page extended abstract gives a short overview of the Dyn-Stack competition, which has been hosted at the Genetic and Evolutionary Computation Conference (GECCO) since 2020. The challenge is to generate optimized solutions for one of three different dynamic optimization scenarios in the domain of warehouse operations. These scenarios are simulated and each simulation model, executed in real-time, publishes its world state (i.e. all available information, e.g. existing transport orders and item stock) into message queues regularly. External optimizers can attach to these queues and send back respective optimization solutions in the form of crane schedules, which are then executed by simulated crane agents in the simulated systems. Dynamic changes within the simulations necessitate updates of crane schedules on a regular basis.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(qi:2024:GECCOcomp, author = {Rui Qi and Ya-Hui Jia}, title = {Key Strategies for Optimal {PV} System Allocation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {9--10}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {distribution networks, evolutionary computation, heuristic, local search, solar photovoltaic}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664045}, size = {2 pages}, abstract = {The optimal photovoltaic (PV) allocation problem is extremely challenging due to the nonlinearity, large solution space, inherent uncertainties, and system constraints of real-world power systems. Considering the complexity of this problem, two problem-specific knowledge is extracted from this problem. Then, guided by the property, a refined bound strategy is designed to narrow down the solution space. Next, a knowledge-based local intensification strategy is proposed to improve the performance further. Most importantly, the algorithm's stability can be improved by two strategies in the meta-heuristic algorithm. Finally, experimental results demonstrate the effectiveness of the proposed strategy, and several conclusions are extracted from the comparison experiments.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nadizar:2024:GECCOcomp, author = {Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet}, title = {Interpretable Control Competition}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {11--12}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {interpretable policy, discrete control, continuous control}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664051}, size = {2 pages}, abstract = {Safety-critical applications rely on control systems-managed components, which not only have demanding requirements in terms of accuracy, but also require transparency and interpretability, to ensure trustworthiness. However, interpretability is often overlooked in favor of performance as (i) people perceive it as secondary, and (ii) there are no objective methods for measuring it. To address these shortcomings and raise awareness about the importance of interpretability in control systems, we propose the Interpretable Control Competition at GECCO 2024. We invite participants to solve two control problems, a discrete and a continuous one, with interpretable policies. We assess the policies in terms of both performance and interpretability, where the latter is evaluated by a pool of domain experts. Our overarching goal is to collect the contributions in a report, which should provide a guideline for practitioners in the field, gathering an array of state-of-the-art methods for interpretable control, together with suitable metrics for assessing the interpretability of a policy.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(meng:2024:GECCOcomp, author = {Weiyao Meng and Rong Qu and Nelishia Pillay}, title = {Machine Learning for Evolutionary Computation - the Vehicle Routing Problems Competition}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {13--14}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {machine learning, evolutionary computation, meta-heuristics, vehicle routing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664046}, size = {2 pages}, abstract = {The Competition of Machine Learning for Evolutionary Computation for Solving Vehicle Routing Problems (ML4VRP) seeks to bring together machine learning and evolutionary computation communities to propose innovative techniques for vehicle routing problems (VRPs), aiming to advance machine learning-assisted evolutionary computation that works well across different instances of the VRPs. This paper overviews the key information of the competition.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(irtiza:2024:GECCOcomp, author = {Muhammad Irtiza and Rudri Kalaria and A. S. M. Kayes}, title = {A Reinforcement Learning-assisted Evolutionary Computing Approach to Capacitated Vehicle Routing with Time Windows}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {15--16}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {vehicle routing, capacitated vehicle routing problem with time windows, evolutionary computing, reinforcement learning, reinforcement learning-assisted evolutionary computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664049}, size = {2 pages}, abstract = {This study proposes an innovative approach to address the Capacitated Vehicle Routing Problem with Time Windows (CVRPTW) by integrating Reinforcement Learning (RL) into Evolutionary Algorithms (EAs), forming the Reinforcement Learning-assisted EA (RL-EA). While traditional EAs struggle with scalability and convergence speed, RL offers promise in dynamic decision-making. By combining the strengths of both paradigms, RL-EAs aim to enhance the state-of-the-art solutions for CVRPTW. The proposed approach seeks to optimise fleet routes while adhering to capacity and time constraints, crucial for logistics and transportation sectors. This study contributes to the advancement of optimisation techniques in complex transport scenarios, offering potential improvements in efficiency and cost-effectiveness.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(hieu:2024:GECCOcomp, author = {Ha Minh Hieu and Hung Phan and Duc Chinh Tran and Duc Cuong Van and Dao Van Tung and Huynh Thi Thanh Binh}, title = {Alimentation Deep Multiple Optimal Ant Colony Optimization to solve Vehicle Routing Problem with Time Windows}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {17--18}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {ant colony optimization, artificial intelligence, neural network}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664050}, size = {2 pages}, abstract = {Recently, there has been a noticeable increase in the adoption of deep learning techniques to tackle time window routing issues, which are prevalent in various real-world applications. Our paper introduces Alimentation Deep Multiple Optimal Ant Colony Optimization (AMO-ACO), a new neural-enhanced method for solving the Vehicle Routing Problem with Time Windows. Our model handles the problem as a multi-objective task, using transfer learning across constraints and exploring various paths for the ant colony from starting points.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(viktorin:2024:GECCOcomp, author = {Adam Viktorin and Michal Pluhacek and Tomas Kadavy and Jozef Kovac and Peter Janku and Roman Senkerik}, title = {{DISH} Solving the {GNBG-generated} Test Suite}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {19--20}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary computation, benchmarking}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664052}, size = {2 pages}, abstract = {This paper presents an extended abstract describing an entry into the benchmarking competition on a new GNBG-generated Test Suite. We are presenting the results of our previously published Distance based parameter adaptation for Success-History based Differential Evolution (DISH) algorithm based on state of the art adaptive differential evolution variants. The key feature of our algorithm is a prolonged exploration due to an updated weighting scheme for control parameter adaptation. The results show that our contestant is able to locate the global optima on 12 out of the 24 test functions.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bannach:2024:GECCOcomp, author = {Max Bannach and Emmanuel Blazquez and Dario Izzo and Giacomo Acciarini and Alexander Hadjiivanov and Gernot Heissel and Rita Mastroianni and Sebastien Origer and Jai Grover and Dominik Dold and Zacharia Rudge}, title = {The Space Optimization Competition: Third Edition}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Hemant Kumar Singh and Daan Apeldoorn and Alexander Dockhorn and Torsten Panholzer and Diederick Vermetten and Konstantin Dietrich and Pascal Kerschke and Carola Doerr and Maciej Komosinski and Konrad Miazga and Agnieszka Mensfelt and Ali Ahrari and Jonathan Fieldsend and Mike Preuss and Xiaodong Li and Michael G. Epitropakis and Johannes Karder and Stefan Wagner and Bernhard Werth and Andreas Beham and Joao Soares and Fernando Lezama and Josex Almeida and Wenlei Bai and Thomas Baeck and Zita Vale and Aneta Neumann and Saba Sadeghi Ahouei and Jacob {de Nobel} and Giorgia Nadizar and Luigi Rovito and Dennis G. Wilson and Eric Medvet and Rong Qu and Nelishia Pillay and Weiyao Meng and Amir H Gandomi and Kalyanmoy Deb and Danial Yazdani and Rohit Salgotra and Mohammad Nabi Omidvar and Max Bannach and Emmanuel Blazquez and Dario Izzo and Francois Clement and Luis Paquete and Kathrin Klamroth and Adriano Rodrigues {Figueiredo Torres} and Markus Wagner}, pages = {21--22}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic computation, optimization, computational challenge}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664048}, size = {2 pages}, abstract = {The organizers of the third Space Optimization Competition (SpOC)1 give a brief overview of the competition. We lay out the timeline and discuss administrative decisions. As was common in previous editions, the competition consists of three problems. We present them swiftly and provide their scientific background and motivation.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(altarabichi:2024:GECCOcomp, author = {Mohammed Ghaith Altarabichi and Slawomir Nowaczyk and Sepideh Pashami and Peyman {Sheikholharam Mashhadi} and Julia Handl}, title = {A Review of Randomness Techniques in Deep Neural Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {23--24}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {deep neural network, hyperparameter, particle swarm optimization, convolutional neural network, randomized neural networks}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664077}, size = {2 pages}, abstract = {This paper investigates the effects of various randomization techniques on Deep Neural Networks (DNNs) learning performance. We categorize the existing randomness techniques into four key types: injection of noise/randomness at the data, model structure, optimization or learning stage. We use this classification to identify gaps in the current coverage of potential mechanisms for the introduction of randomness, leading to proposing two new techniques: adding noise to the loss function and random masking of the gradient updates. We use a Particle Swarm Optimizer (PSO) for hyperparameter optimization and evaluate over 30,000 configurations across standard computer vision benchmarks. Our study reveals that data augmentation and weight initialization randomness significantly improve performance, and different optimizers prefer distinct randomization types. The complete implementation and dataset are available on GitHub1.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the original work published at [2].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(briesch:2024:GECCOcomp, author = {Martin Briesch and Ryan Boldi and Dominik Sobania and Alexander Lalejini and Thomas Helmuth and Franz Rothlauf and Charles Ofria and Lee Spector}, title = {Improving Lexicase Selection with Informed {Down-Sampling}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {25--26}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, lexicase selection, informed down-sampling}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664068}, size = {2 pages}, abstract = {This short paper presents the main findings of our work titled Informed Down-Sampled Lexicase Selection: Identifying Productive Training Cases for Efficient Problem Solving, which was recently published in the Evolutionary Computation Journal. In this work, we introduce informed down-sampled lexicase selection to dynamically build diverse subsets of training cases during evolution using population statistics. We evaluate our method on a set of program synthesis problems in two genetic programming systems and find that informed down-sampling improves performance in both systems compared to random down-sampling when using lexicase selection. Additionally, we investigate the constructed down-samples and find that informed down-sampling can identify important training cases and does so across different evolutionary runs and systems.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cerf:2024:GECCOcomp, author = {Sacha Cerf and Benjamin Doerr and Benjamin Hebras and Yakob Kahane and Simon Wietheger}, title = {Hot off the Press: The First Proven Performance Guarantees for the {Non-Dominated} Sorting Genetic Algorithm {II} {(NSGA-II)} on a Combinatorial Optimization Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {27--28}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {NSGA-II, runtime analysis, multi-objective optimization, theory}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664080}, size = {2 pages}, abstract = {Recently, the first mathematical runtime guarantees have been obtained for the NSGA-II, one of the most prominent multi-objective optimization algorithms, however only for synthetic benchmark problems.In this work, we give the first proven performance guarantees for a classic optimization problem, the NP-complete bi-objective minimum spanning tree problem. More specifically, we show that the NSGA-II with population size N ge 4((n - 1)wmax + 1) computes all extremal points of the Pareto front in an expected number of O(m2nwmax log(nwmax)) iterations, where n is the number of vertices, m the number of edges, and wmax is the maximum edge weight in the problem instance. This result confirms, via mathematical means, the good performance of the NSGA-II observed empirically. It also paves the way for analyses of the NSGA-II on complex combinatorial optimization problems.As a side result, we also obtain a new analysis of the performance of the GSEMO algorithm on the bi-objective minimum spanning tree problem, which improves the previous best result by a factor of |F|, the number of points in the convex hull of the Pareto front, a set that can be as large as nwmax. The main reason for this improvement is our observation that both algorithms find the different extremal points in parallel rather than sequentially, as assumed in the previous proofs.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Sacha Cerf, Benjamin Doerr, Benjamin Hebras, Jakob Kahane, and Simon Wietheger. 2023. The first proven performance guarantees for the Non-Dominated Sorting Genetic Algorithm II (NSGA-II) on a combinatorial optimization problem. In International Joint Conference on Artificial Intelligence, TJCAI2023. ijcai.org, 5522--5530 [1].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chou:2024:GECCOcomp, author = {Yao-Hsin Chou and Cheng-Yen Hua and Shu-Yu Kuo and Yu-Chi Jiang and Sy-Yen Kuo}, title = {{Evo-Panel:} Dynamic Visualization Tool for Optimization Process}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {29--30}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {computational intelligence, evolutionary computation, interactive systems, metaheuristics, optimization visualization tools, XAI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664079}, size = {2 pages}, abstract = {This article for the Hot-off-the-Press track at GECCO 2024 summarizes recent work titled 'Evo-Panel: Dynamic Visualization Tool for Optimization Process', published in IEEE Transactions on Emerging Topics in Computational Intelligence [4]. We believe this work is of interest to the GECCO community as it represents the first attempt to establish a comprehensive dynamic visualization tool, Evo-Panel, capable of directly and flexibly illustrating the detailed procedures of metaheuristics and optimization methods in solving numerical benchmark functions. The objective of Evo-Panel is to help users identify the influence of the design formulas, operations, and parameters on the capabilities and performances of the algorithms. Using Evo-Panel, users can trace each movement, intuitively observe the process, and interpret the processes related to evolutionary algorithms (EAs), which are important to explainable AI. Moreover, Evo-Panel can perform a comparative analysis to illustrate the differences between algorithms. The case study results demonstrate that employing Evo-Panel enables professors to explain optimization algorithms visually engagingly and enhances the students' understanding of EAs. The tool can promote CI-related education from teaching and learning perspectives. Furthermore, researchers can use it to obtain information for analysis, facilitate debugging, verify design ideas, gain insights into the process, and improve the design of EAs.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(de-winter:2024:GECCOcomp, author = {Roy {de Winter} and Bas Milatz and Julian Blank and Niki {Van Stein} and Thomas Baeck and Kalyanmoy Deb}, title = {Hot off the Press: Parallel {Multi-Objective} Optimization for Expensive and Inexpensive Objectives and Constraints}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {31--32}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {surrogate assisted optimization, multi-objective optimization, constraint optimization, parallel optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664058}, size = {2 pages}, abstract = {This paper presents the Inexpensive Objectives and Constraints Self-Adapting Multi-Objective Constraint Optimization algorithm using Radial Basis Function Approximations (IOC-SAMO-COBRA). This algorithm is introduced to address constraint multi-objective optimization problems that involve a mix of computationally expensive and inexpensive evaluation functions. The IOC-SAMO-COBRA algorithm iteratively learns the expensive functions with radial basis function surrogates, while the inexpensive functions are directly used in the search for promising solutions. Two benefits of using the inexpensive functions directly include: (1) The inexpensive functions do not introduce approximation errors like surrogates do. (2) Time can be saved as there is no need to fit and interpolate surrogate models for the inexpensive functions. Results on 22 test functions indicate that exploiting the inexpensive functions can be beneficial as this results in better Pareto front approximations compared to using surrogates for both expensive and inexpensive functions.This paper serves as an extended abstract for the Hot-off-the-Press track at GECCO 2024, building upon the paper Parallel Multi-Objective Optimization for Expensive and Inexpensive Objectives and Constraints by de Winter et al, published in Swarm and Evolutionary Computation, Volume 86 (2024) [4].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(dinot:2024:GECCOcomp, author = {Matthieu Dinot and Benjamin Doerr and Ulysse Hennebelle and Sebastian Will}, title = {Hot off the Press: Runtime Analyses of {Multi-Objective} Evolutionary Algorithms in the Presence of Noise}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {33--34}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {runtime analysis, multi-objective optimization, robustness to noise, theory}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664070}, size = {2 pages}, abstract = {In this work, we conduct the first mathematical runtime analysis of a simple multi-objective evolutionary algorithm (MOEA) on a classic benchmark in the presence of noise in the objective function. We prove that when bit-wise prior noise with rate p le alpha/n, alpha a suitable constant, is present, the simple evolutionary multi-objective optimizer (SEMO) without any adjustments to cope with noise finds the Pareto front of the OneMinMax benchmark in time O(n2 log n), just as in the case without noise. Given that the problem here is to arrive at a population consisting of n + 1 individuals witnessing the Pareto front, this is a surprisingly strong robustness to noise (comparably simple evolutionary algorithms cannot optimize the single-objective OneMax problem in polynomial time when p = omega(log(n)/n)). Our proofs suggest that the strong robustness of the MOEA stems from its implicit diversity mechanism designed to enable it to compute a population covering the whole Pareto front.Interestingly this result only holds when the objective value of a solution is determined only once and the algorithm from that point on works with this, possibly noisy, objective value. We prove that when all solutions are reevaluated in each iteration, then any noise rate p = omega(log(n)/n2) leads to a super-polynomial runtime. This is very different from single-objective optimization, where it is generally preferred to reevaluate solutions whenever their fitness is important and where examples are known such that not reevaluating solutions can lead to catastrophic performance losses.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Matthieu Dinot, Benjamin Doerr, Ulysse Hennebelle, and Sebastian Will. 2023. Runtime analyses of multi-objective evolutionary algorithms in the presence of noise. In International Joint Conference on Artificial Intelligence, IJCAI 2023. ijcai.org, 5549-555 [6].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(doerr:2024:GECCOcomp, author = {Benjamin Doerr and Aymen Echarghaoui and Mohammed Jamal and Martin S. Krejca}, title = {Runtime Analysis of the $(mu + 1)$ {GA:} Provable {Speed-Ups} from Strong Drift towards Diverse Populations}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {35--36}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {theory, runtime analysis, genetic algorithm, crossover, jump}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664065}, size = {2 pages}, abstract = {Most evolutionary algorithms used in practice heavily employ crossover. In contrast, the rigorous understanding of how crossover is beneficial is largely lagging behind. In this work, we make a considerable step forward by analyzing the population dynamics of the (mu + 1) genetic algorithm when optimizing the Jump benchmark. We observe (and prove via mathematical means) that once the population contains two different individuals on the local optimum, the diversity in the population increases in expectation. From this drift towards more diverse states, we show that a diversity suitable for crossover to be effective is reached quickly and, more importantly, then persists for a time that is at least exponential in the population size mu. This drastically improves over the previously best known guarantee, which is only quadratic in mu.Our new understanding of the population dynamics easily gives stronger performance guarantees. In particular, we derive that population sizes logarithmic in the problem size n already suffice to gain an Omega(n)-factor runtime improvement from crossover (previous works achieved comparable bounds only with mu = Theta(n) or via a non-standard mutation rate).This paper for the hot-off-the-press track at GECCO 2024 summarizes the work Benjamin Doerr, Aymen Echarghaoui, Mohammed Jamal, and Martin S. Krejca: Runtime analysis of the (mu + 1) GA: Provable speed-ups from strong drift towards diverse populations. Conference on Artificial Intelligence, AAAI 2024. AAAI Press, 20683--20691. DOI: 10.1609/aaai.v38i18.30055 [4].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(fong:2024:GECCOcomp, author = {Kei Sen Fong and Mehul Motani}, title = {Enhancing Prediction, Explainability, Inference and Robustness of Decision Trees via Symbolic {Regression-Discovered} Splits}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {37--38}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, symbolic regression, decision tree}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664067}, size = {2 pages}, abstract = {We introduce a hybrid machine learning algorithm that utilizes Genetic Programming-based Symbolic Regression (SR) to create decision trees (DT) with enhanced prediction, explainability, inference and robustness. Conventional DT algorithms for classification tasks are limited to axis-parallel splits. Thus, when the true boundaries do not align with feature axes, DT is likely to exhibit complex structures. In this work, we introduce SR-Enhanced DT (SREDT), which utilizes SR to increase the richness of the class of potential DT splits. We assess the performance of SREDT on both synthetic and real-world datasets. Despite its simplicity, our approach yields remarkably compact trees that surpass DT and its variant, oblique DT (ODT), in supervised classification tasks in terms of accuracy and F-score. SREDT possesses low depth, with a small number of leaves and terms, increasing explainability. SREDT also makes faster inference, even compared to DT. SREDT also demonstrates the highest robustness to noise. Furthermore, despite being a small white-box model, SREDT demonstrates competitive performance with large black-box tabular classification algorithms, including tree ensembles and deep models. This Hot-of-the-Press paper summarizes the work, K.S. Fong and M. Motani, "Symbolic Regression Enhanced Decision Trees for Classification Tasks", The Annual AAAI Conference on Artificial Intelligence (AAAI'24).}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(klikovits:2024:GECCOcomp, author = {Stefan Klikovits and Cedric {Ho Thanh} and Ahmet Cetinkaya and Paolo Arcaini}, title = {Trust Your Neighbours: Handling Noise in {Multi-Objective} Optimisation Using {kNN-Averaging} {(GECCO'24} Hot off the Press)}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {39--40}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {kNN-averaging, noisy system control, noisy fitness, search-based approaches, multi-objective optimization, cyber-physical systems}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664075}, size = {2 pages}, abstract = {The non-deterministic nature of modern systems such as cyber-physical systems (e.g. due to sensor noise) and multi-process/multi-agent systems (e.g. due to timing differences), poses a significant challenge in the field of multi-objective optimisation (MOO). Those systems may produce different objective values on every evaluation of the objective function, in which case the effectiveness of classical MOO algorithms can no longer be guaranteed. It has indeed been observed that they are prone to producing results that are either not optimal or not feasible.An obvious, yet naive, solution is to approximate the true fitness of a solution by sampling the objective function multiple times. However, this leads to significantly more evaluations of the objective function, which may not be acceptable, e.g. if the fitness function is expensive to compute.To tackle this issue, we propose kNN-averaging, an MOO algorithm that approximates the true fitness of solutions based on a k-nearest neighbours (kNN) regression scheme. We experimentally compare kNN-averaging to two resampling-based methods, a Gaussian process-based spectral sampling approach, and the default, uncorrected baseline, on 40 benchmark problems and one real-world case study.This is an extended abstract of the paper:S. Klikovits, C. Ho Thanh, A. Cetinkaya, and P. Arcaini. Trust your neighbours: Handling Noise in Multi-Objective Optimisation Using kNN-Averaging. Applied Soft Computing, Vol. 146, (2023). https://doi.org/10.1016/j.asoc.2023.110631}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lavinas:2024:GECCOcomp, author = {Yuri Lavinas and Marcelo Ladeira and Gabriela Ochoa and Claus Aranha}, title = {Multiobjective Evolutionary Component Effect on Algorithm Behavior}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {41--42}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm analysis, multiobjective optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664069}, size = {2 pages}, abstract = {There has been an increased interest in the automatic design of multiobjective algorithms from their components to simplify the development and application of new approaches. These machine designed algorithms (auto-MOEA) can outperform their human-designed counterparts, but it is still unknown which components mostly influence their performance. We propose a methodology to investigate the components' effects on the final auto-MOEA configuration. We study the components' impact using Search Trajectory Networks (STNs), diversity of the population, and anytime hyper-volume values. We found that analyzing the objective and decision spaces simultaneously provides complementary information about the algorithms behaviour.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Yuri Lavinas, Marcelo Ladeira, Gabriela Ochoa, and Claus Aranha. 2023. Multiobjective Evolutionary Component Effect on Algorithm behavior. ACM Trans. Evol. Learn. Optim. Just Accepted (August 2023). https://doi.org/10.1145/3612933 [3].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(le-tolguenec:2024:GECCOcomp, author = {Paul-Antoine {Le Tolguenec} and Emmanuel Rachelson and Yann Besse and Dennis Wilson}, title = {Summary of "Curiosity creates Diversity in Policy Search"}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {43--44}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary strategies, intrinsic motivation, policy search}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664076}, size = {2 pages}, abstract = {When searching for policies, reward-sparse environments often lack sufficient information about which behaviors to improve upon or avoid. In such environments, the policy search process is bound to blindly search for reward-yielding transitions and no early reward can bias this search in one direction or another. A way to overcome this is to use intrinsic motivation in order to explore new transitions until a reward is found. In this work, we use a recently proposed definition of intrinsic motivation, Curiosity, in an evolutionary policy search method. In [4], we proposed Curiosity-ES1, an evolutionary strategy adapted to use Curiosity as a fitness metric. We compare Curiosity-ES with other evolutionary algorithms intended for exploration, as well as with Curiosity-based reinforcement learning, and find that Curiosity-ES can generate higher diversity without the need for an explicit diversity criterion and leads to more policies which find reward.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(li-calsi:2024:GECCOcomp, author = {Davide {Li Calsi} and Matias Duran and Xiao-Yi Zhang and Paolo Arcaini and Fuyuki Ishikawa}, title = {Distributed Repair of Deep Neural Networks (Hot off the Press at {GECCO} 2024)}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {45--46}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {DNN repair, automated repair, distributed search, risk levels}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664081}, size = {2 pages}, abstract = {Deep Neural Networks (DNNs) are increasingly used for critical tasks, such as classification in autonomous driving, whose trustworthiness is extremely important. To guarantee trustworthiness, DNN repair has been used to improve DNN performance, by using metaheuristic search to find alternative values of specific weights, that allow to improve the DNN accuracy. However, achieving perfect accuracy is not possible, and, therefore, one should prioritise the most critical misclassifications, such as those of pedestrians. To this aim, we propose DistrRep, a search-based DNN repair approach that considers priorities among the different misclassifications given by their risk levels. For each misclassification, DistrRep identifies the weights responsible for that misclassification, and runs a repair approach based on Particle Swarm Optimization that fixes the weights. Then, starting from all the repaired models, it runs another search-based repair that searches for the DNN model that integrates all the single repaired models, by considering the risk levels of the different misclassifications. Experimental results show that the search-based approach implemented by DistrRep is more effective that retraining approaches and other DNN repair approaches.This is an extended abstract of the paper: D. Li Calsi, M. Duran, X. Zhang, P. Arcaini, and F. Ishikawa, "Distributed Repair of Deep Neural Networks", in 16th IEEE Conference on Software Testing, Verification and Validation (ICST 2023).}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(mauder:2024:GECCOcomp, author = {Tomas Mauder and Jakub Kudela and Lubomir Klimes and Martin Zalesak and Pavel Charvat}, title = {Hot Off the Press: Soft computing methods in the solution of an inverse heat transfer problem with phase change}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {47--48}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {inverse heat transfer, soft computing, machine learning, meta-heuristics, surrogate model, fuzzy logic}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664073}, size = {2 pages}, abstract = {This Hot Off the Press paper summarizes our recent work "Soft computing methods in the solution of an inverse heat transfer problem with phase change: A comparative study" published in Engineering Applications of Artificial Intelligence [5]. In the paper, we study inverse heat transfer problems with phase change, where the boundary heat flux is estimated. Such problems are ill-posed and their solution is challenging. Although there were conventional developed for this problem in the past, they are not well-suited for cases including phase change, as these contain strong non-linearities that bring additional computational difficulties. For such problems, soft computing methods provide a promising approach. Four methods from distinct categories of techniques are applied to this problem and thoroughly compared - the conventional gradient-based method, a fuzzy logic-based method, a population-based meta-heuristic, and a surrogate-assisted method. A reformulation of the problem utilizing dimension reduction and decomposition schemes was developed, bringing extensive computational improvements. The metaheuristic and the surrogate-based methods showed superior performance. Their performance was also rather stable and insensitive to the location of the temperature sensor (the source of data for the inverse estimation). A Zenodo repository with the complete implementation of all considered problems and methods is available1.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(morinaga:2024:GECCOcomp, author = {Daiki Morinaga and Kazuto Fukuchi and Jun Sakuma and Youhei Akimoto}, title = {Linear Convergence Rate Analysis of the {(1+1)-ES} on Locally Strongly Convex and Lipschitz Smooth Functions}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {49--50}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {derivative-free optimization, black-box optimization, evolution strategy, linear convergence, convergence rate, convex optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664074}, size = {2 pages}, abstract = {Evolution strategy (ES) achieves widespread success in applications as a continuous black-box optimization algorithm. However, theoretical guarantee of its convergence rate has been done only inside convex quadratic functions and their monotonic transformations. In this study, we derive an upper bound and a lower bound of the rates of linear convergence of the (1+1)-ES on locally L-strongly convex and U-smooth functions, as exp [EQUATION] and [EQUATION], respectively. The order of the upper bound derived is competitive to that derived for a derivative-free optimization (DFO) algorithm in the previous study. Unlike DFO studies, any prior knowledge on the mathematical properties of the objective function such as Lipschitz constant is not given to the optimization algorithm.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(opris:2024:GECCOcomp, author = {Andre Opris and Duc-Cuong Dang and Dirk Sudholt}, title = {Hot of the Press: Crossover Can Guarantee Exponential {Speed-Ups} in Evolutionary {Multi-Objective} Optimisation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {51--52}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary computation, runtime analysis, evolutionary multi-objective optimisation, recombination}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664057}, size = {2 pages}, abstract = {Despite the popularity of evolutionary multi-objective (EMO) algorithms in practices, their theoretical foundations are still in the early development. Fundamental questions such as the benefits of crossover are not fully understood. This work provides a theoretical analysis of the well-known EMO algorithms GSEMO and NSGA-II to showcase the possible advantages of crossover. We propose two classes of "royal road" functions, one tailored for one-point crossover and another one for uniform crossover, on which the EMO algorithms cover the Pareto front in expected polynomial time if crossover is being used. But when disabling crossover, they require exponential time to cover the Pareto front. The latter even holds for a large class of black-box algorithms using any elitist selection and any unbiased mutation operator, or even with the use of some immune-inspired hypermutation operator. This is the first example of a proven exponential performance gap through enabling crossover for the NSGA-II. This Hot-off-the-Press paper summarises the work Duc-Cuong Dang, Andre Opris, and Dirk Sudholt. Crossover can guarantee exponential speed-ups in evolutionary multi-objective optimisation, Artificial Intelligence, 330 (2024), 104098.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(renzullo:2024:GECCOcomp, author = {Joseph Renzullo and Westley Weimer and Stephanie Forrest}, title = {Combining Online Learning with {Mutation-Based} Stochastic Search to Repair Buggy Programs}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {53--54}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neutral mutations, automated program repair, software mutational robustness, multiplicative weights update}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664082}, size = {2 pages}, abstract = {This article summarizes recent work in the field of Automated Program Repair that was published in Transactions on Evolutionary Learning and Optimization as Evolving Software: Combining Online Learning with Mutation-Based Stochastic Search. Automated Program Repair is a subfield of software engineering that has the goal of repairing defects in software with minimal human involvement. A popular approach combines random mutation with some form of search, but these methods are highly conservative, because most mutations are deleterious and can damage the program. We describe a method inspired by neutral mutations in biological systems that splits the problem of finding useful mutations into two stages. First, before a bug is identified, we generate mutations and screen them for safety, discarding any that break required functionality of the program. Then, when a software bug is reported, we rapidly and dynamically test large subsets of the earlier-discovered pool of mutations to find those that repair the defect. We implement this method in an algorithm called MWRepair, which uses online learning to guide the aggressiveness of the search process. MWRepair extends the reach of existing mutation-based techniques to repair harder and more complex defects in programs.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(santucci:2024:GECCOcomp, author = {Valentino Santucci and Josu Ceberio}, title = {Model-based Gradient Search using the {Plackett-Luce} model}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {55--56}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {gradient search, permutation, probability distribution, combinatorial optimization, parameters adaptation}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664063}, size = {2 pages}, abstract = {Computational optimization and optimization algorithms form a cornerstone area of computer science that has been extensively explored due to its myriad applications in many practical and real-world scenarios. At a high level, we can categorize computational optimization algorithms into two main classes: white-box and black-box algorithms. While the former require a certain degree of knowledge about the problem domain, the latter are more versatile and rely solely on basic domain information, such as the structure or representation of a solution. Perhaps, the most prominent examples of white-box algorithms are those which iteratively refine a solution based on the gradient of the objective function at hand. While widely used, particularly in the field of machine learning, these techniques have limited applicability in other domains where gradient information is unavailable. This is the case of Combinatorial Optimization Problems (COPs), where the domain consists of discrete solutions and, for this reason, the concept of gradient is not applicable.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(stripinis:2024:GECCOcomp, author = {Linas Stripinis and Jakub Kudela and Remigijus Paulavicius}, title = {Hot Off the Press: Benchmarking {Derivative-Free} Global Optimization Algorithms under Limited Dimensions and Large Evaluation Budgets}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {57--58}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {numerical benchmarking, derivative-free global optimization, evolutionary computation algorithms, nature-inspired meta-heuristics, deterministic algorithms}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664072}, size = {2 pages}, abstract = {This Hot Off the Press paper provides a brief summary of our recent work "Benchmarking Derivative-Free Global Optimization Algorithms under Limited Dimensions and Large Evaluation Budgets" published in IEEE Transactions on Evolutionary Computation [5]. In the paper, we performed a comprehensive computational comparison between stochastic and deterministic global optimization algorithms with twenty-five representative state-of-the-art methods selected from both classes. The experiments were set up with up to twenty dimensions and relatively large evaluation budgets (105 X n). Benchmarking was carried out in a significantly expanded version of the DIRECTGOLib v2.0 library, which included ten distinct collections of primarily continuous test functions. The evaluation of the methods focused on various aspects, such as solution quality, time complexity, and function evaluation usage. The rankings were determined using statistical tests and performance profiles. Our findings suggest that while state-of-the-art deterministic methods could find reasonable solutions with comparatively fewer function evaluations, most stochastic algorithms require more extensive evaluation budgets to deliver comparable results. However, the performance of stochastic algorithms excelled in more complex and higher-dimensional problems. These research findings offer valuable insights for practitioners and researchers, enabling them to tackle diverse optimization problems effectively.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(takeno:2024:GECCOcomp, author = {Shion Takeno and Masahiro Nomura and Masayuki Karasuyama}, title = {Hot off the Press: Towards Practical Preferential Bayesian Optimization with Skew Gaussian Processes}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {59--60}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {bayesian optimization, gaussian processes, preferential feedback}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664060}, size = {2 pages}, abstract = {We study preferential Bayesian optimization (BO) where reliable feedback is limited to pairwise comparison called duels. An important challenge in preferential BO, which uses the preferential Gaussian process (GP) model to represent flexible preference structure, is that the posterior distribution is a computationally intractable skew GP. The most widely used approach for preferential BO is Gaussian approximation, which ignores the skewness of the true posterior. Alternatively, Markov chain Monte Carlo (MCMC) based preferential BO is also proposed. In this work, we first verify the accuracy of Gaussian approximation, from which we reveal the critical problem that the predictive probability of duels can be inaccurate. This observation motivates us to improve the MCMC-based estimation for skew GP, for which we show the practical efficiency of Gibbs sampling and derive the low variance MC estimator. However, the computational time of MCMC can still be a bottleneck in practice. Towards building a more practical preferential BO, we develop a new method that achieves both high computational efficiency and low sample complexity, and then demonstrate its effectiveness through extensive numerical experiments.This paper for the Hot-off-the-Press track at GECCO'24 summarizes the work: Towards Practical Preferential Bayesian Optimization with Skew Gaussian Processes, ICML'23 [19].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp, author = {Ziming Wang and Changwu Huang and Xin Yao}, title = {Feature Attribution Explanation Based on {Multi-Objective} Evolutionary Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {61--62}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {explainable machine learning, feature attribution explanations, multi-objective learning, multi-objective evolutionary algorithms}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664056}, size = {2 pages}, abstract = {Feature attribution explanation (FAE) method, which reveals the contribution of each input feature to the model's output, is one of the most popular explainable artificial intelligence techniques. To assess the quality of explanations provided by FAE methods, various metrics spanning multiple dimensions have been introduced. However, current FAE approaches often prioritize faithfulness of their explanations, neglecting other crucial aspects. To address this issue, we define the construction of a FAE explainable model as a multi-objective learning problem and propose a framework that simultaneously considers multiple quality metrics during FAE explanation generation. Our experimental results demonstrate that our approach outperforms existing FAE methods in terms of faithfulness, sensitivity, and complexity. Moreover, our method has better diversity and the capacity to offer different explanations for different stakeholders.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Ziming Wang, Changwu Huang, Yun Li, and Xin Yao: Multi-objective Feature Attribution Explanation for Explainable Machine Learning published in ACM Transactions on Evolutionary Learning and Optimization [5].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wietheger:2024:GECCOcomp, author = {Simon Wietheger and Benjamin Doerr}, title = {A Mathematical Runtime Analysis of the Non-dominated Sorting Genetic Algorithm {III} {(NSGA-III)}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {63--64}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {NSGA-III, runtime analysis, multi-objective optimization, theory}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664062}, size = {2 pages}, abstract = {The Non-dominated Sorting Genetic Algorithm II (NSGA-II) is the most prominent multi-objective evolutionary algorithm for real-world applications. While it performs evidently well on bi-objective optimization problems, empirical studies suggest that it is less effective when applied to problems with more than two objectives. A recent mathematical runtime analysis confirmed this observation by proving that the NGSA-II for an exponential number of iterations misses a constant factor of the Pareto front of the simple m-objective OneMinMax problem when m ge 3.In this work, we provide the first mathematical runtime analysis of the NSGA-III, a refinement of the NSGA-II aimed at better handling more than two objectives. We prove that the NSGA-III with sufficiently many reference points - a small constant factor more than the size of the Pareto front, as suggested for this algorithm - computes the complete Pareto front of the 3-objective OneMinMax benchmark in an expected number of O(n log n) iterations. This result holds for all population sizes (that are at least the size of the Pareto front). It shows a drastic advantage of the NSGA-III over the NSGA-II on this benchmark.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Simon Wietheger and Benjamin Doerr. A mathematical runtime analysis of the Non-dominated Sorting Genetic Algorithm III (NSGA-III). In International Joint Conference on Artificial Intelligence, IJCAI 2023. 5657--5665, 2023. [15].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhang:2024:GECCOcomp, author = {Hengzhe Zhang and Qi Chen and Bing Xue and Wolfgang Banzhaf and Mengjie Zhang}, title = {A Semantic-based Hoist Mutation Operator for Evolutionary Feature Construction in Regression [Hot off the Press]}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {65--66}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, bloat control, automated machine learning, evolutionary feature construction}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664071}, size = {2 pages}, abstract = {This Hot-off-the-Press paper summarizes our recently published work, "A Semantic-based Hoist Mutation Operator for Evolutionary Feature Construction in Regression" [9] published in IEEE Transactions on Evolutionary Computation. Our study introduces a semantic-based hoist mutation operator to control tree bloat and reduce tree sizes in genetic programming (GP) based evolutionary feature construction algorithms. The proposed operator identifies the most informative subtree with the largest cosine similarity to the target semantics and then hoists the subtree to the root as a new GP tree. This process reduces the tree sizes without compromising learning capability. The proposed operator is supported by the probably approximately correct (PAC) learning theory, ensuring that it does not degrade the generalization upper bound of GP models. By employing a hashing-based redundancy checking strategy, the proposed method outperforms seven bloat control methods on 98 datasets in reducing model size while maintaining test performance at the same level. These findings demonstrate the capability of using semantic hoist mutation for bloat control in GP-based evolutionary feature construction.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zheng:2024:GECCOcomp, author = {Weijie Zheng and Benjamin Doerr}, title = {Hot off the Press: Runtime Analysis for the {NSGA-II:} Proving, Quantifying, and Explaining the Inefficiency For Many Objectives}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {67--68}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {NSGA-II, runtime analysis, many-objective optimization, theory}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664061}, size = {2 pages}, abstract = {The NSGA-II is one of the most prominent algorithms to solve multi-objective optimization problems. Despite numerous successful applications, several studies have shown that the NSGA-II is less effective for larger numbers of objectives. In this work, we use mathematical runtime analyses to rigorously demonstrate and quantify this phenomenon. We show that even on the simple m-objective generalization of the discrete OneMinMax benchmark, where every solution is Pareto optimal, the NSGA-II also with large population sizes cannot compute the full Pareto front (objective vectors of all Pareto optima) in sub-exponential time when the number of objectives is at least three. The reason for this unexpected behavior lies in the fact that in the computation of the crowding distance, the different objectives are regarded independently. This is not a problem for two objectives, where any sorting of a pair-wise incomparable set of solutions according to one objective is also such a sorting according to the other objective (in the inverse order).This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Weijie Zheng, Benjamin Doerr: Runtime Analysis for the NSGA-II: Proving, Quantifying, and Explaining the Inefficiency For Many Objectives. IEEE Transactions on Evolutionary Computation, in press. https://doi.org/10.1109/TEVC.2023.3320278 [23].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zheng:2024:GECCOcomp2, author = {Weijie Zheng and Benjamin Doerr}, title = {Hot off the Press: Runtime Analysis of the {SMS-EMOA} for {Many-Objective} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {69--70}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {SMS-EMOA, NSGA-II, runtime analysis, many-objective optimization, theory}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664064}, size = {2 pages}, abstract = {The NSGA-II was recently proven to have difficulties in many-objective optimization. In contrast, the literature experimentally shows a good performance of the SMS-EMOA, which can be seen as a steady-state NSGA-II that uses the hypervolume contribution instead of the crowding distance as the second selection criterion.This paper conducts the first rigorous runtime analysis of the SMS-EMOA for many-objective optimization. To this aim, we first propose a many-objective counterpart, the m-objective mOJZJ problem, of the bi-objective OneJumpZeroJump benchmark, which is the first many-objective multimodal benchmark used in a mathematical runtime analysis. We prove that SMS-EMOA computes the full Pareto front of this benchmark in an expected number of O(M2nk) iterations, where n denotes the problem size (length of the bit-string representation), k the gap size (a difficulty parameter of the problem), and M = (2n/m - 2k + 3)m/2 the size of the Pareto front. This result together with the existing negative result on the original NSGA-II shows that in principle, the general approach of the NSGA-II is suitable for many-objective optimization, but the crowding distance as tie-breaker has deficiencies.We obtain three additional insights on the SMS-EMOA. Different from a recent result for the bi-objective OneJumpZeroJump benchmark, the stochastic population update often does not help for mOJZJ. It results in a l/Theta(min{Mk1/2/2k/2, 1}) speed-up, which is Theta(1) for large m such as m > k. On the positive side, we prove that heavy-tailed mutation still results in a speed-up of order kk+0.5--beta. Finally, we conduct the first runtime analyses of the SMS-EMOA on the bi-objective OneMinMax and LOTZ benchmarks and show that it has a performance comparable to the GSEMO and the NSGA-II.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Weijie Zheng, Benjamin Doerr: Runtime Analysis of the SMS-EMOA for Many-Objective Optimization. AAAI 2024, 20874--20882 [20].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zheng:2024:GECCOcomp3, author = {Weijie Zheng and Mingfeng Li and Renzhong Deng and Benjamin Doerr}, title = {How to Use the Metropolis Algorithm for {Multi-Objective} Optimization?}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Marcus Gallagher}, pages = {71--72}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {metropolis algorithm, runtime analysis, multi-objective optimization, multimodal, theory}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664078}, size = {2 pages}, abstract = {As demonstrated by empirical and theoretical work, the Metropolis algorithm can cope with local optima by accepting inferior solutions with suitably small probability. This paper extends this research direction into multi-objective optimization.The original Metropolis algorithm has two components, one-bit mutation and the acceptance strategy, which allows accepting inferior solutions. When adjusting the acceptance strategy to multi-objective optimization in the way that an inferior solution that is accepted replaces its parent, then the Metropolis algorithm is not very efficient on our multi-objective version of the multimodal DLB benchmark called DLTB. With one-bit mutation, this multi-objective Metropolis algorithm cannot optimize the DLTB problem, with standard bit-wise mutation it needs at least Omega(n5) time to cover the full Pareto front. In contrast, we show that many other multi-objective optimizers, namely the GSEMO, SMS-EMOA, and NSGA-II, only need time O(n4). When keeping the parent when an inferior point is accepted, the multi-objective Metropolis algorithm both with one-bit or standard bit-wise mutation solves the DLTB problem efficiently, with one-bit mutation experimentally leading to better results than several other algorithms.Overall, our work suggests that the general mechanism of the Metropolis algorithm can be interesting in multi-objective optimization, but that the implementation details can have a huge impact on the performance.This paper for the Hot-off-the-Press track at GECCO 2024 summarizes the work Weijie Zheng, Mingfeng Li, Renzhong Deng, and Benjamin Doerr: How to Use the Metropolis Algorithm for Multi-Objective Optimization? In Conference on Artificial Intelligence, AAAI 2024, AAAI Press, 20883--20891. https://doi.org/10.1609/aaai.v38i18.30078 [22].}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(abimbola:2024:GECCOcomp, author = {Jeremiah Abimbola and Daniel Kostrzewa and Pawel Kasprowski}, title = {Genetic {Algorithm-Based} Optimization ofWeighted Voting in Ensemble Models for Time Signature Detection}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {73--74}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {time signature, genetic algorithms, ensemble learning, weighted voting}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664083}, size = {2 pages}, abstract = {Ensemble models, comprising different base classifiers, offer enhanced predictive performance by aggregating multiple predictions. In the context of time signature detection, where accurately identifying musical time signatures is crucial, ensemble methods play a pivotal role. This study investigates the optimization of ensemble model performance using a genetic algorithm (GA) to determine optimal weights for weighted voting on eight models; four classical models which are Support Vector Machine (SVM), k-Nearest Neighbors (KNN), Naive Bayes (NB), Random Forest (RF), and four deep learning models; Convolutional Neural Network (CNN), Convolutional Recurrent Neural Network (CRNN), Residual Network with 18 layers (RESNET18), and RESNET18 with Long Short-Term Memory (LSTM) using Mel-Frequency Cepstral Coefficients (MFCC) as audio features. Notably, the base models RESNET18 and RESNET18-LSTM exhibited promising results, prompting the use of GA to fine-tune their weights. Following the GA optimization, the ensemble accuracy improved by almost 2\%. This research proves the importance of acknowledging that our initial assumptions regarding weight assignment may not always be optimal. It also contributes to the advancement of ensemble learning techniques tailored to music signal processing applications, especially for time signature detection systems.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ayodele:2024:GECCOcomp, author = {Mayowa Ayodele}, title = {Utilising Quantum Hybrid Solver for Bi-objective Quadratic Assignment Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {75--76}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective, CQM, bi-objective CAP, quantum hybrid solver}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664097}, size = {2 pages}, abstract = {The intersection between quantum computing and optimisation has been an area of interest in recent years. There have been numerous studies exploring the application of quantum and quantum-hybrid solvers to various optimisation problems. This work explores scalarisation methods within the context of solving the bi-objective quadratic assignment problem using a quantum-hybrid solver. We show results that are consistent with previous research on a different Ising machine.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(babaagba:2024:GECCOcomp, author = {Kehinde Babaagba and Ritwik Murali and Sarah Thomson}, title = {Exploring the use of fitness landscape analysis for understanding malware evolution}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {77--78}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664094}, size = {2 pages}, abstract = {We conduct a preliminary study exploring the potential of using fitness landscape analysis for understanding the evolution of malware. This type of optimisation is fairly new and has not previously been studied through the lens of landscape analysis. We consider Android-based malware evolution and an existing evolutionary algorithm from the literature is used. We construct and visualise search trajectory networks (STNs), which are a new tool aimed at investigation of algorithm behaviour. The STNs indicate that the considered malware spaces may be difficult to navigate under current search operators and that new ones may warrant consideration.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(benecki:2024:GECCOcomp, author = {Pawel Benecki and Daniel Kostrzewa and Piotr Grzesik and Bohdan Shubyn and Dariusz Mrozek}, title = {Enhancing Anomaly Detection in Automated Guided Vehicles through Feature Weight Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {79--80}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, power consumption prediction, automated guided vehicles, anomaly detection, bicriterial optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664100}, size = {2 pages}, abstract = {In this study, we propose an approach for optimizing feature weights to enhance the detection of mechanical wear in Automated Guided Vehicles (AGVs) based on power consumption data. Leveraging genetic algorithms, we aim to maximize forecasting quality on normal operational data while amplifying errors on anomalous fragments. Our method utilizes a custom telemetry dataset and Long Short-Term Memory (LSTM)-based forecasting models. Experimental results demonstrate the effectiveness of the proposed optimization method in improving anomaly detection performance, showcasing significant enhancements in precision, accuracy, and overall predictive maintenance capabilities for AGVs.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(briffoteaux:2024:GECCOcomp, author = {Guillaume Briffoteaux and Andrew Ciezak and Pablo Moscato}, title = {Towards Target {Derivatives-Enhanced} Continued Fraction Regression}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {81--82}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {continued fraction, symbolic regression, memetic algorithm}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664644}, size = {2 pages}, abstract = {We apply a Memetic Algorithm (MA) to regression problems, incorporating a novel guiding function based on higher-order target derivatives. Evaluating this approach in low-data settings with continued fraction regression (CFR) reveals significant improvements in interpolation and extrapolation performance, and a reduction in spurious poles.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cai:2024:GECCOcomp, author = {Jinyu Cai and Jinglue Xu and Jialong Li and Takuto Yamauchi and Hitoshi Iba and Kenji Tei}, title = {Exploring the Improvement of Evolutionary Computation via Large Language Models}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {83--84}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {large language model, evolutionary computation}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664086}, size = {2 pages}, abstract = {Evolutionary computation (EC), as a powerful optimization algorithm, has been applied across various domains. However, as the complexity of problems increases, the limitations of EC have become more apparent. The advent of large language models (LLMs) has not only transformed natural language processing but also extended their capabilities to diverse fields. By harnessing LLMs' vast knowledge and adaptive capabilities, we provide a forward-looking overview of potential improvements LLMs can bring to EC, focusing on the algorithms themselves, population design, and additional enhancements. This presents a promising direction for future research at the intersection of LLMs and EC.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chen:2024:GECCOcomp, author = {Min Chen and Yanyan Zhang}, title = {A decision space variable topology feature visualization and clustering method for Dynamic Multi-objective Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {85--86}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {dynamic multi-objective optimization, visualization, cluster, dimensionality reduction}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664085}, size = {2 pages}, abstract = {We propose a dynamic multi-objective optimization algorithm for visualizing and clustering topological features of decision space variables. It maps all the obtained historical optimal solutions to the two-dimensional space through t-SNE, clusters the points, and selects a corresponding proportion of individuals from different clusters to form a new population. Throughout the search process, the topological characteristics of the reduced historical optimal solutions can be visually displayed. We conducted a preliminary experimental analysis on the standard test function to verify the effectiveness of the proposed method.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cotta:2024:GECCOcomp, author = {Carlos Cotta and Jesus Martinez-Cruz}, title = {Energy Consumption Analysis of Batch Runs of Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {87--88}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms, energy consumption, green AI, sustainable computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664093}, size = {2 pages}, abstract = {We analyze the energy consumption of running evolutionary algorithms in batch as a function of the rest time between runs. It is shown that energy consumption can be reduced by 5\%-8\% by inserting short pauses between runs to reduce hysteretic phenomena.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cotta:2024:GECCOcomp2, author = {Carlos Cotta}, title = {Navigating the Aisles: Evolutionary Algorithms for Supermarket Evacuation Planning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {89--90}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664096}, size = {2 pages}, abstract = {We address the placement of emergency exits to facilitate evacuation from supermarket-like environments. Using a simulation-based approach, evolutionary algorithms are shown to provide good results compared to a greedy heuristic and a numerical method.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(diarchangel:2024:GECCOcomp, author = {Korben Diarchangel and Liam Francisco and Jude Koenig and Sushil Louis}, title = {Evolving Potential Fields for Rules of the Road Compliant Ship Driving}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {91--92}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, simulation training, potential fields control}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664089}, size = {2 pages}, abstract = {We use potential fields tuned by genetic algorithms to drive simulated vessels following the nautical rules of the road in a training simulation. In our work, we use novel potential fields based on the relative bearing and target angle of the vessels at risk of collision. These non-linear potential fields are difficult to tune well by hand and we therefore use genetic algorithms to optimize search through the space of potential field parameter values. The genetic algorithm seeks parameter values that optimizes vessel behavior by avoiding collisions while following the nautical rules of the road governing the behavior of vessels at sea. We conducted experiments on five major types of scenarios identified by expert mariners who provided optimal paths for vessels to follow. Results indicate that genetic algorithms find good potential field parameter values that result in better fitness and behavior than hand tuned parameters and that the path based fitness function may be key to better ship movement.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(eremeev:2024:GECCOcomp, author = {Anton Eremeev and Valentin Topchii}, title = {Generalization of the {Heavy-Tailed} Mutation in the (1+(lambda,lambda)) Genetic Algorithm}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {93--94}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, heavy-tailed mutation, optimization time}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664095}, size = {2 pages}, abstract = {The heavy-tailed mutation operator, proposed by Doerr, Le, Makhmara, and Nguyen (2017) for evolutionary algorithms, is based on the power-law assumption of mutation rate distribution. Here we generalize the power-law assumption on the distribution function of mutation rate. We show that upper bounds on the expected optimization time of the (1 + (lambda, lambda)) genetic algorithm obtained by Antipov, Buzdalov and Doerr (2022) for the OneMax fitness function do not only hold for power-law distribution of mutation rate, but also for a wider class of distributions, defined in terms of power-law constraints on the cumulative distribution function of mutation rate. In particular, it is shown that, on this function class, the sufficient conditions of Antipov, Buzdalov and Doerr (2022) on the heavy-tailed mutation, ensuring the O(n) optimization time in expectation, may be generalized as well. This optimization time is known to be asymptotically faster than what can be achieved by the (1 + (lambda, lambda)) genetic algorithm with any static mutation rate.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(hong:2024:GECCOcomp, author = {Seung-Yeol Hong and Yong-Hyuk Kim}, title = {A Memetic Algorithm for Deployment of Search and Rescue Units}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {95--96}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {decision making, memetic algorithm, local search, deployment problem, search and rescue planning}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664088}, size = {2 pages}, abstract = {Optimizing the deployment of the search area is a crucial issue that determines the effectiveness of a search and rescue plan. However, previous studies have struggled to accurately reflect real-world conditions and achieve optimal search area deployment. In this study, we propose a local search algorithm and a memetic algorithm specialized in search area deployment. Additionally, for the practicality of the study, we introduce a constraint that changes the size of the search area based on the distance traveled. By experiments, we could show that the proposed memetic algorithm is more practical and effective than previous studies in maritime search and rescue planning.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(horn:2024:GECCOcomp, author = {Ruben Horn and Sarah L. Thomson and Daan {van den Berg} and Pieter Adriaans}, title = {The Easiest Hard Problem: Now Even Easier}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {97--98}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {the easiest hard problem, number partitioning problem, combinatorial optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664099}, size = {2 pages}, abstract = {We present an exponential decay function that characterizes the number of solutions to instances of the Number Partitioning Problem (NPP) with uniform distribution of bits across the integers. This function is fitted on the number of optimal solutions of random instances with lengths between 10 and 20 integers and may be used as a heuristic either directly by new algorithms for the NPP or as a benchmark to evaluate how well different Evolutionary Algorithms (EAs) cover the search space. Despite the long history of the NPP, it seems such a characterization does not yet exist.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(jia:2024:GECCOcomp, author = {Weiyan Jia and Lixin Tang and Yang Yang and Yun Dong}, title = {Bi-objective approach for lot-sizing problem in cold rolling production planning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {99--100}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithm, lot-sizing, production planning, steel production}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664084}, size = {2 pages}, abstract = {Planning in the cold rolling is an increasingly important vital aspect in steel industry. In this study, we investigate a cold rolling planning problem derived from a steel company, which is characterized by multi-item considerations with sequence-dependent setups in a single period. To tackle this problem, we introduce a bi-objective modified lot-sizing model, aiming at minimizing both the capacity waste and setup cost. A dedicated multi-objective evolutionary algorithm is proposed to solve this problem. The experiment results demonstrate the effectiveness of the proposed algorithm in achieving near-optimal solutions within a reasonable time, which can facilitate reductions in both production costs and time.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(moreno:2024:GECCOcomp, author = {Matthew Andres Moreno and Connor Yang and Emily Dolson and Luis Zaman}, title = {Trackable Island-model Genetic Algorithms at Wafer Scale}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {101--102}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {island-model genetic algorithm, phylogenetics, wafer-scale computing, evolutionary computation, high-performance computing, cerebras wafer-scale engine, agent-based modeling, phylogenetic tracking}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664090}, size = {2 pages}, abstract = {Emerging ML/AI hardware accelerators, like the 850,000 processor Cerebras Wafer-Scale Engine (WSE), hold great promise to scale up the capabilities of evolutionary computation. However, challenges remain in maintaining visibility into underlying evolutionary processes while efficiently utilizing these platforms' large processor counts. Here, we focus on the problem of extracting phylogenetic information from digital evolution on the WSE platform. We present a tracking-enabled asynchronous island-based genetic algorithm (GA) framework for WSE hardware. Emulated and on-hardware GA benchmarks with a simple tracking-enabled agent model clock upwards of 1 million generations a minute for population sizes reaching 16 million. This pace enables quadrillions of evaluations a day. We validate phylogenetic reconstructions from these trials and demonstrate their suitability for inference of underlying evolutionary conditions. In particular, we demonstrate extraction of clear phylometric signals that differentiate wafer-scale runs with adaptive dynamics enabled versus disabled. Together, these benchmark and validation trials reflect strong potential for highly scalable evolutionary computation that is both efficient and observable. Kernel code implementing the island-model GA supports drop-in customization to support any fixed-length genome content and fitness criteria, allowing it to be leveraged to advance research interests across the community.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pricopie:2024:GECCOcomp, author = {Stefan Pricopie and Richard Allmendinger and Manuel Lopez-Ibanez and Clyde Fare and Matt Benatan and Joshua Knowles}, title = {Bayesian Optimization with Setup Switching Cost}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {103--104}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {bayesian optimization, switching cost, expensive optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664101}, size = {2 pages}, abstract = {Bayesian Optimization (BO) in its classical form is cost-unaware. However, many real-world problems are resource-constrained and hence incur a cost whenever such resources are needed, such as when a new setup is used. We are then looking at adapted cost-aware solution methods that are improving the performance of BO over cost-constrained problems. We find that parameter-free algorithms can yield comparable results to fine-tuned algorithms used in constrained optimization.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ruszczak:2024:GECCOcomp, author = {Bogdan Ruszczak}, title = {Coupling Temporal Convolutional Networks with Anomaly Detection and {CMA-ES} for the Electricity Price Forecasting}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {105--106}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {hyper-parameter optimization, deep learning, temporal convolutional network, CMA-ES, electricity price forecasting}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664098}, size = {2 pages}, abstract = {The process of forecasting using temporal convolutional neural networks on multivariate market data is an established research topic due to its practical applications. Although there exists a machine learning model for this task, their performance is still limited when they are deployed for highly volatile markets. In this study, we tackle this challenge through exploiting an additional module within a machine learning model that provides the auxiliary information captured from the input data and reflecting anomalous events observed in such markets. To further enhance the capabilities of the model, its hyperparameters are optimized using the covariance matrix adaptation evolution strategy. The experiments performed on the real-world electricity price quotes for the day-ahead market showed the superiority of our approach which achieved the prediction error of sMAPE of 8.19 and rMAE of 0.319, outperforming the vanilla model, which offered the sMAPE and rMAE of 17.80 and 0.693, and the baseline TCN of 16.34 and 0.603 accordingly.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sun:2024:GECCOcomp, author = {Ning Sun and Shuwei Zhu}, title = {Modeling and Optimization of Energy Consumption in Data-driven Factory Air Conditioning System}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {107--108}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, machine learning, energy consumption prediction, energy-saving optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664103}, size = {2 pages}, abstract = {The Make-up Air Unit (MAU) is an air conditioning device serving semiconductor cleanrooms, which provides constant temperature and humidity for fresh air. The common air handling sections of the MAU can be divided into: fresh air filtration, preheating, precooling, water-washing humidification, recooling, reheating, air supply, and high-efficiency filtration. The commonly used PID control methods for MAU have limitations, this work proposes a proactive energy-saving optimization control method based on machine learning and genetic algorithm (GA). Firstly, the machine learning methods are used to train historical data of MAU, resulting in a data-driven prediction model of energy consumption for the system. Subsequently, GA is used to optimize energy in cold and hot water systems. It facilitates the dynamic adjustment of the regulating valve opening for the cold and hot water coil in the fresh air unit, responding to real-time variations in outdoor air conditions. Meanwhile, it ensures that the supply air temperature and humidification adhere to specified requirements, thereby reducing the energy consumption associated with cold and hot water usage in the MAU.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sun:2024:GECCOcomp2, author = {Zhuoran Sun and Ying Ying Liu and Parimala Thulasiraman and Ruppa Thulasiram}, title = {Parallel {Co-Evolutionary} Algorithm and Implementation on {CPU-GPU} Multicore}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {109--110}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {parallel computing, co-evolutionary, multi-objective optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664091}, size = {2 pages}, abstract = {In this paper, we study the parallelization of hybrid evolutionary heuristic for solving multi-objective optimization problems on heterogeneous CPU-GPU multi-core architectures. Two independent heuristics, Non-dominated Sorting Genetic Algorithm II (NSGA-II) and Multi-objective Evolutionary Algorithm based on Decomposition (MOEA/D) are evolved concurrently, periodically exchanging information to explore and exploit the objective space. NSGA-II is parallelized on GPU accelerator and MOEA/D on CPU multithreaded multicore architecture following an island model. The parallel algorithm's performance is evaluated on bi-objective travelling salesperson benchmark data set.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sun:2024:GECCOcomp3, author = {Nan Sun and Shuwei Zhu}, title = {Surrogate Modeling in Efficient Evolutionary Multiobjective Clustering Algorithm}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {111--112}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary multi-objective optimization, multiobjective clustering, surrogate models}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664102}, size = {2 pages}, abstract = {Evolutionary multiobjective clustering (MOC) algorithms have shown powerful capability in generating a set of clusterings for decision making when the number of clusters k is not given. However, the quantities of data in many fields are increasing dramatically, which consequently challenges existing MOC methods in the computational burden. In this study, we explore surrogate-assisted techniques to reduce the cost of function evaluations, so as to develop an efficient framework of evolutionary MOC. Designed around a graph-based solution encoding based on the trained seed nodes of the dataset, the cluster representation is not only flexible and unbiased, but also keeps a fixed-length reduced decision space that is independent of different k. During optimization, various widely-used surrogate models and a novel surrogate model based on graph convolutional neural-network (GCN) is adopted to predict the clustering objective functions. Hence, this work aims to fill the important research gap in surrogate modeling of efficient MOC algorithm. We compare the performance of different surrogate models on several datasets, showing encouraging results, especially the newly-developed GCN-based model.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yelleti:2024:GECCOcomp, author = {Vivek Yelleti and Vadlamani Ravi and P. {Radha Krishna}}, title = {Online Feature Subset Selection in Streaming Features by Parallel Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {113--114}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {online feature subset selection, particle swarm optimization, hybrid evolutionary algorithm, data streams}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664092}, size = {2 pages}, abstract = {Performing online feature subset selection (OFS) when data samples arrive at a high velocity in a time-dependent manner is a critical problem to solve. The situation becomes more difficult when the features also arrive in a stream. Several efforts were made by the researchers to perform OFS over feature streams. However, they are not scalable and cannot analyze feature streams coming at a high velocity. Further, optimal feature subsets must be identified by scalable approaches. It is noteworthy that evolutionary algorithms (EAs) which are inherently parallel and scalable are least employed for OFS in a streaming feature case. This motivated us to address the challenges and propose a generic EA-based wrapper for OFS to mine feature streams under the Apache Spark environment.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yu:2024:GECCOcomp, author = {Seo-Ah Yu and Yong-Hyuk Kim}, title = {Enhancing Graph Cut Search through a {Surrogate-Assisted} Genetic Algorithm with {Edge-Based} Encoding}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {115--116}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {surrogate-assisted genetic algorithm, genetic algorithm, machine learning, binary encoding, optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664087}, size = {2 pages}, abstract = {This paper addresses graph cut problems, which are critical issues applied in diverse fields such as image segmentation, data clustering, and circuit design. Unlike the existing encoding method that only represents spatial information of vertices, we train a machine learning model through a new encoding that contains information about edges between vertices. After that, we design a surrogate-assisted genetic algorithm that substitutes the fitness computation of the genetic algorithm with a trained machine-learning model. Encoding containing edge information dramatically improved the approximation performance of machine learning models in all test cases, and this improvement also had a positive effect on the ability to find the optimal solution of a surrogate-assisted genetic algorithm. So, in all test cases, our surrogate-assisted genetic algorithm using a new encoding outperformed that using the existing encoding. Surprisingly, it even found a better solution than a normal genetic algorithm in 53\% of test cases, which shows the possibility that a surrogate-assisted genetic algorithm can be an alternative to complement the shortcomings of a normal genetic algorithm.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhang:2024:GECCOcomp2, author = {Weiwei Zhang and Meiling Xu and Lixin Tang}, title = {Joint Entropy Enhanced Multi-objective Bayesian Optimization for Graph Pruning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Justyna Petke and Will N. Browne}, pages = {117--118}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective bayesian optimization, graph pruning, evolutionary algorithm}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3665088}, size = {2 pages}, abstract = {Graph learning processes entire attributed graphs, posing computational and scalability challenges. This study proposes a joint entropy enhanced multi-objective Bayesian optimization approach (JE-MoBopt) for efficient graph pruning. By employing the non-dominated sorting genetic algorithm II (NSGA-II), JE-MoBopt can effectively identify the Pareto front calculated by the surrogate models. Furthermore, we introduce a joint entropy search (JES) acquisition function to select an appropriate point from the Pareto solution set, thereby refining the surrogate models. We evaluate the JE-MoBopt approach on brain networks to satisfy both efficiency and accuracy requirements while preserving the important edges.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(gallagher:2024:GECCOcomp, author = {Marcus Gallagher and Mario Munoz}, title = {Towards an Improved Understanding of Features for More Interpretable Landscape Analysis}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {135--138}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {landscape analysis, benchmarking, algorithm-problem mapping, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654301}, size = {4 pages}, abstract = {Landscape analysis has received increasing attention in the literature, with a major focus on producing feature data to be used in a machine learning pipeline for automatic algorithm selection and configuration. In contrast, the original purpose of landscape analysis (i.e. to develop theory and techniques to understand the structure of fitness landscapes) has been somewhat overshadowed. This paper aims to clarify the purpose of landscape features, emphasizing the need for clear definitions of distance or similarity between optimization problem instances. It argues that current methodologies' shortcomings can be better understood from this perspective and advocates for more interpretable features and distances to gain explainable insights into problem instances, algorithm-problem mappings, and improved benchmarking of black-box algorithms.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(hajari:2024:GECCOcomp, author = {Sara Hajari and Marcus Gallagher}, title = {Searching for Benchmark Problem Instances from {Data-Driven} Optimisation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {139--142}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm benchmarking, continuous blackbox optimisation, clustering, fitness landscape analysis, Benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654322}, size = {4 pages}, abstract = {Given the large number of existing metaheuristic optimisation algorithms, there has been an increasing focus on improving benchmarking practices, to gain an improved understanding empirical performance, as well as matching algorithms and problems. One important aspect of this more diversity in test problem sets so that algorithms are evaluated on a wider range of problem instances, including problems that are more real-world representative.In this paper, we explore data-driven benchmarking problems. By perturbing the data, we perform meta-search over this space of problem instances. Specifically, we consider sum of squares clustering problems and find instances that test CMA-ES. This approach can be effective, even when the dataset is very simple. In addition, we show that simple clustering instances have interesting properties, including neutrality (missing from many commonly-used benchmarks). We argue that data-driven optimisation can provide useful insights into problem structure and algorithm behaviour.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nikolikj:2024:GECCOcomp, author = {Ana Nikolikj and Tome Eftimov}, title = {Comparing Solvability Patterns of Algorithms across Diverse Problem Landscapes}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {143--146}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm behavior, single-objective optimization, latent representations, supervised machine learning, explainability, Benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654305}, size = {4 pages}, abstract = {In the field of continuous single-objective black-box optimization, understanding the varying performances of algorithms across different problem instances is crucial. A recent approach based on the concept of "algorithm footprint" identifies both easy and challenging problem instances for an algorithm. However, a major challenge persists --- the lack of comparability among different algorithm footprints for effective benchmarking. This study introduces a solution through a multi-target regression model (MTR), which predicts the performance of multiple algorithms simultaneously, using a common set of problem landscape features. By establishing a common landscape feature set and using a single performance prediction model, not only can algorithm footprints be compared, but the explanations for the predicted algorithm performance derived with Explainable Artificial Intelligence (XAI) techniques can also be analyzed systematically. The methodology is applied to a set of three distinct algorithms, revealing their respective strengths and weaknesses on the Black-Box Optimization Benchmarking (BBOB) suite.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rasulo:2024:GECCOcomp, author = {Anthony Rasulo and Kate Smith-Miles and Mario Munoz and Julia Handl and Manuel Lopez-Ibanez}, title = {Extending Instance Space Analysis to Algorithm Configuration Spaces}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {147--150}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm configuration, instance space analysis, explainability, Benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654264}, size = {4 pages}, abstract = {This paper describes an approach for deriving visual insights about the joint relationship between algorithm performance, algorithm parameters, and problem instance features. This involves the combined analysis and exploration of a 2D instance space, to which instances from some problem space are projected, and a 2D configuration space, to which (algorithm) configurations are projected. Extending on the dimensionality reduction problem solved in Instance Space Analysis, we define an optimisation problem for finding projections to these two spaces, with an interpretable relationship between them. Then, we describe the tools developed for probing those spaces in an investigation of the question: What characterises the algorithm configurations that perform best on a selected group of instances (or vice versa)? We demonstrate the use of these tools on synthetic data with known ground truth.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(renau:2024:GECCOcomp, author = {Quentin Renau and Johann Dreo and Emma Hart}, title = {Ealain: A Camera Simulation Tool to Generate Instances for Multiple Classes of Optimisation Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {151--154}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {instance-generation, black-box optimisation, software tools, Benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654299}, size = {4 pages}, abstract = {Artificial benchmark datasets are common in both numerical and discrete optimisation domains. Existing benchmarks cover a broad range of classes of optimisation, but as a general rule have limited value due to their poor resemblance to real-world problems, and generally lack the ability to generate arbitrary numbers of instances. In this paper, we introduce Ealain, an instance-generator that creates instances of optimisation problems which require placement of a number of cameras in a domain --- this has many real-world analogies for example in environmental monitoring or providing security in a building. The software provides two types of camera-model and can be used to generate an infinite number of instances of black-box, real-world-like optimisation problems which can be single-objective, multi-objective, multi-fidelity, or constrained. The software is also flexible in that it also permits a range of different objective functions to be defined. Furthermore, generated instances can be solved using either a numerical or discrete encoding of solutions. The C++ library targets fast computation and can be easily plugged into a solver of choice. We summarise the key features of the Ealain software and provide some examples of the type of instances that can be generated for different classes of optimisation problems.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rook:2024:GECCOcomp, author = {Jeroen Rook and Holger H. Hoos and Heike Trautmann}, title = {Multi-objective Ranking using Bootstrap Resampling}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {155--158}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm benchmarking, robust ranking, multi-objective performance measurement, Benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654436}, size = {4 pages}, abstract = {Benchmarking and related competitions are widely used for assessing and comparing solver performances. However, underlying uncertainty due to the composition of the instance set and bias induced by choosing specific performance criteria is often not sufficiently addressed. Moreover, performance assessment is almost always multi-objective in nature and no objective, totally neutral approach to it exists. We build on recent work of robust ranking for single-objective solver performance assessment based on bootstrap resampling and introduce a multi-objective robust ranking extension shown to provide new and promising perspectives onto existing competition rankings.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(shmalko:2024:GECCOcomp, author = {Elizaveta Shmalko and Askhat Diveev}, title = {Benchmark Problems for Machine Learning in Control Synthesis}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {2123--2126}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary machine learning, symbolic regression, control, unsupervised learning, Benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654405}, size = {4 pages}, abstract = {Symbolic regression methods are becoming increasingly popular and they are a worthy competitor to neural networks in many tasks. However, symbolic regression methods have the greatest promise in those problems where there is no training sample and it is necessary to search for a solution to a machine learning problem only on the basis of some given general quality criterion. A wide class of such problems includes control synthesis problems. Machine learning control tasks consist in searching for a vector- function of control according to some criterion. As a rule, there is no training sample for such tasks, except to approximate the operator's actions. However, this approach is unlikely to satisfy the given criterion. Some symbolic regression methods have already shown good results in control problems. But the complexity of the tasks reveals the need to develop new approaches and modifications of existing methods. In this regard, a benchmark of machine learning control problems is proposed to test symbolic regression methods. Problem formulations and reference solutions are presented.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yang:2024:GECCOcomp, author = {Tianxing Yang and Huigen Ye and Hua Xu}, title = {Learning to Generate Scalable {MILP} Instances}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {159--162}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {mixed-integer linear programming, optimization, unsupervised learning, benchmarking, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3661481}, size = {4 pages}, abstract = {Large-scale Mixed-Integer Linear Programming (MILP) problems have been efficiently addressed using Machine Learning (ML)-based frameworks, especially ML-based evolutionary optimization frameworks to obtain high-quality solutions. When addressing real-world MILP problems, ML-based evolutionary optimization frameworks often face challenges in acquiring sufficient instances that belong to the same category. This underscores the need for generators that can autonomously produce MILP problems from existing instances. This paper introduces MILPGen, a novel generative framework for autonomous MILP instance generation. Our key contribution lies in the two-stage problem generation in MILPGen: 1) Node Splitting and Merging, which splits the bipartite graph and tries to reconstruct it; 2) Scalable Problem Construction, which concatenates tree structures to get larger problems. We demonstrate that the instances generated by MILPGen are highly similar to the original problem instances and can effectively enhance the solution effect of the ML-based evolutionary optimization frameworks. Further experiments show that the scaled-up generated instances still retain the problem's structural properties, validating the proposed framework's effectiveness.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhao:2024:GECCOcomp, author = {Yifan Zhao and Zhenyu Liang and Zhichao Lu and Ran Cheng}, title = {A Multi-objective Optimization Benchmark Test Suite for Real-time Semantic Segmentation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Carola Doerr and Arnaud Liefooghe}, pages = {163--166}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective optimization, benchmarking, real-time semantic segmentation, Benchmarks, Software, Reproducibility: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654389}, size = {4 pages}, abstract = {As one of the emerging challenges in Automated Machine Learning, the Hardware-aware Neural Architecture Search (HW-NAS) tasks can be treated as black-box multi-objective optimization problems (MOPs). An important application of HW-NAS is real-time semantic segmentation, which plays a pivotal role in autonomous driving scenarios. The HW-NAS for real-time semantic segmentation inherently needs to balance multiple optimization objectives, including model accuracy, inference speed, and hardware-specific considerations. Despite its importance, benchmarks have yet to be developed to frame such a challenging task as multi-objective optimization. To bridge the gap, we introduce a tailored streamline to transform the task of HW-NAS for real-time semantic segmentation into standard MOPs. Building upon the streamline, we present a benchmark test suite, CitySeg/MOP, comprising fifteen MOPs derived from the Cityscapes dataset. The CitySeg/MOP test suite is integrated into the EvoXBench platform to provide seamless interfaces with various programming languages (e.g., Python and MATLAB) for instant fitness evaluations. We comprehensively assessed the CitySeg/MOP test suite on various multi-objective evolutionary algorithms, showcasing its versatility and practicality. Source codes are available at https://github.com/EMI-Group/evoxbench.}, notes = {GECCO-2024 BBSR A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(boldi:2024:GECCOcomp, author = {Ryan Boldi and Matthew Fontaine and Sumeet Batra and Gaurav Sukhatme and Stefanos Nikolaidis}, title = {Generating Diverse Critics for Conditioned Policy Distillation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mary Katherine Heinrich and Emily Dolson}, pages = {167--170}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quality diversity, reinforcement learning, policy distillation, conditioned policies, Complex Systems: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654429}, size = {4 pages}, abstract = {Quality Diversity Reinforcement Learning (QD-RL) allows for the creation of a large set of behaviorally diverse yet high performing policies. However, these techniques often require the maintenance of a large set of parameters for individual neural network controllers, which have lots of redundancy between them. In this work, we outline and test an approach that uses quality diversity optimization to produce a large archive of small Q-functions that can be used as critics to train a larger, task conditioned, policy. We verify the feasibility of this approach in a maze navigation domain, and find that this method is a promising technique for conditioned policy production.}, notes = {GECCO-2024 CS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(boldi:2024:GECCOcomp2, author = {Ryan Boldi and Li Ding and Lee Spector}, title = {Solving Deceptive Problems Without Explicit Diversity Maintenance}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mary Katherine Heinrich and Emily Dolson}, pages = {171--174}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {implicit diversity maintenance, objective subaggregation, lexicase selection, MAP-elites, Complex Systems: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654099}, size = {4 pages}, abstract = {Navigating deceptive domains has often been a challenge in machine learning due to search algorithms getting stuck at sub-optimal local maxima. Many algorithms have been proposed to navigate these domains by explicitly maintaining population diversity, such as Novelty Search, MAP-Elites or other so-called Quality Diversity algorithms. In this paper, we present an approach to solve deceptive problems by optimizing a series of defined objectives instead. These objectives can be created by subaggregating the performance of individuals, and optimized by lexicase selection, a technique shown to implicitly maintain population diversity. We compare this method to a commonly used quality diversity algorithm, MAP-Elites, on a set of discrete optimization and reinforcement learning domains. We find that lexicase selection with decomposed objectives outperforms MAP-Elites on the deceptive domains that we explore. Furthermore, we find that this technique results in competitive performance on the diversity-focused metrics of QD-Score and Coverage, without explicitly optimizing for these things. We also find that this technique is robust to the choice of subaggregation method and its benefits scale to high dimensional control tasks. This work highlights that diversity does not always need to be explicitly maintained for it to help solve deceptive problems.}, notes = {GECCO-2024 CS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nisioti:2024:GECCOcomp, author = {Eleni Nisioti and Erwan Plantec and Milton Montero and Joachim Pedersen and Sebastian Risi}, title = {Growing Artificial Neural Networks for Control: the Role of Neuronal Diversity}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mary Katherine Heinrich and Emily Dolson}, pages = {175--178}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolution, neuroevolution, morphogenesis, development, Complex Systems: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654356}, size = {4 pages}, abstract = {In biological evolution complex neural structures grow from a handful of cellular ingredients. As genomes in nature are bounded in size, this complexity is achieved by a growth process where cells communicate locally to decide whether to differentiate, proliferate and connect with other cells. This self-organisation is hypothesized to play an important part in the generalisation, and robustness of biological neural networks. Artificial neural networks (ANNs), on the other hand, are traditionally optimized in the space of weights. Thus, the benefits and challenges of growing artificial neural networks remain understudied. Building on the previously introduced Neural Developmental Programs (NDP), in this work we present an algorithm for growing ANNs that solve reinforcement learning tasks. We identify a key challenge: ensuring phenotypic complexity requires maintaining neuronal diversity, but this diversity comes at the cost of optimization stability. To address this, we introduce two mechanisms: (a) equipping neurons with an intrinsic state inherited upon neurogenesis; (b) lateral inhibition, a mechanism inspired by biological growth, which controlls the pace of growth, helping diversity persist. We show that both mechanisms contribute to neuronal diversity and that, equipped with them, NDPs achieve comparable results to existing direct and developmental encodings in complex locomotion tasks.}, notes = {GECCO-2024 CS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(papadopoulos:2024:GECCOcomp, author = {Vassilis Papadopoulos and Guilhem Doat and Arthur Renard and Clement Hongler}, title = {Looking for Complexity at Phase Boundaries in Continuous Cellular Automata}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mary Katherine Heinrich and Emily Dolson}, pages = {179--182}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {lenia, artificial, life, alife, complex systems, phases: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654420}, size = {4 pages}, abstract = {One key challenge in Artificial Life is designing systems that display an emergence of complex behaviors. Many such systems depend on a high-dimensional parameter space, only a small subset of which displays interesting dynamics. Focusing on the case of continuous systems, we introduce the 'Phase Transition Finder'(PTF) algorithm, which can be used to efficiently generate parameters lying at the border between two phases. We argue that such points are more likely to display complex behaviors, and confirm this by applying PTF to Lenia showing it can increase the frequency of interesting behaviors more than two-fold, while remaining efficient enough for large-scale searches.}, notes = {GECCO-2024 CS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(azerine:2024:GECCOcomp, author = {Abdennour Azerine and Mahmoud Golabi and Ammar Oulamara and Lhassane Idoumghar}, title = {Enhancing Electric Vehicle Charging Schedules: A {Surrogate-Assisted} Approach}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {183--186}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {electric vehicle, charging scheduling problem, genetic algorithm, linear programming, surrogate models, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654303}, size = {4 pages}, abstract = {This paper addresses the pressing issue of efficiently scheduling electric vehicle (EV) charging at public stations to alleviate strain on the electrical grid. EV drivers provide their charging needs beforehand, and the scheduler optimizes charger allocation and power distribution to minimize discrepancies in state-of-charge levels at departure. To tackle the complexity of this NP-hard problem, this study proposes a solution framework combining a genetic algorithm with linear programming. Surrogate models are also investigated to expedite problem-solving. Simulation results demonstrate the effectiveness of these approaches in managing the complexities of EV charging scheduling.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chen:2024:GECCOcomp2, author = {Qinan Chen and Qiqi Xia and Kaixuan Sun and Jin Wang and Bo Yuan}, title = {Memetic Algorithm for Multi-objective Virtual Network Slice Configuration}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {187--190}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {virtual network slice configuration, multi-objective optimization, network latency, memetic algorithm, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654315}, size = {4 pages}, abstract = {Through virtual network slicing, a physical network in 5G can support various vertical applications with different reliability requirements for the end-to-end latency. This paper models the problem of virtual network slice configuration (VNSC) as a multi-objective optimization problem (cost vs. reliability), considering a real-world scenario in which the latency distribution of network nodes is unknown. Instead of evaluating the network end-to-end latency by full Monte Carlo Simulation (MCS), we design a more efficient approximation evaluation method based on MCS and distribution fitting. Furthermore, based on NSGA-II, a multi-objective memetic algorithm for VNSC is proposed by introducing domain knowledge in both population initialization and chromosome mutation processes. The experimental studies on different network architectures demonstrate the superiority of the proposed method, i.e., the total amount of the MCS data can be reduced to 2\%, and the fitness evaluation times can be reduced to 53.3\%-86.4\% on the slice instances of different scales without sacrificing slice performance.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(da-ros:2024:GECCOcomp, author = {Francesca {Da Ros} and Luca {Di Gaspero} and Marie-Louise Lackner and Nysret Musliu and Felix Winter}, title = {Local Search Algorithms for the Oven Scheduling Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {191--194}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {local search, oven scheduling problem, batch scheduling problem, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654158}, size = {4 pages}, abstract = {The Oven Scheduling Problem is an NP-hard real-world parallel batch problem arising in electronic component manufacturing. The goal of this problem is to schedule jobs on ovens while minimizing total oven runtime, job tardiness, and setup costs. To reduce oven runtime, compatible jobs can be processed simultaneously in batches. Schedules must respect oven eligibility and availability, job release dates, setup times between batches, and oven capacity constraints, as well as compatibility of job processing times and attributes. We propose and fine-tune a set of local search-based methods including Simulated Annealing, Late Acceptance Hill Climbing, and Tabu Search. A comparative analysis with solutions obtained by exact methods demonstrates the robustness of local search-based methods. We are capable of finding optimal solutions whenever the optimum is known and significantly improve the solution quality for more than half of the instances. The best local search approaches find high-quality solutions within a short amount of time (i.e., already in 30 seconds) which demonstrates their effectiveness for real-world applications.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(goudet:2024:GECCOcomp, author = {Olivier Goudet and Adrien Goeeffon and Frederic Saubion and Sebastien Verel}, title = {Emergence of Strategies for Univariate {Estimation-of-Distribution} Algorithms with Evolved Neural Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {195--198}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {estimation-of-distribution algorithm, neuro-evolution, quadratic unconstrained binary optimization, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654319}, size = {4 pages}, abstract = {Our study focuses on the development of new Estimation of Distribution Algorithms (EDAs) with neuro-evolution for pseudo-Boolean optimization problems. We define a strategy for updating the frequency vector at each generation using a neural network, trained by an evolutionary algorithm. To evaluate the effectiveness of our approach, we carried out experiments on instances of the Quadratic Binary Unconstrained Optimization (QUBO) problem of different types. The algorithm automatically discovered demonstrates its competitiveness with existing EDAs in the literature.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(li:2024:GECCOcomp, author = {Tianyang Li and Ying Meng and Lixin Tang}, title = {Symmetry Parallel Search Strategy For Permutation-related Optimization Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {199--202}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654204}, size = {4 pages}, abstract = {In this paper, we propose a symmetry parallel search strategy that can be applied to permutation-related combinatorial optimization problems. The proposed symmetry parallel search strategy considers the parity of the permutations and decomposes the solution space into two independent regions, i.e., even and odd permutations. Then, we analyze the relations between the parity of permutations and two representative neighborhood search operators. This allows trajectory-based metaheuristics to keep searching on even or odd permutations without breaking the utility of search operators. For experiments, we take the quadratic assignment problem and two classical trajectory-based metaheuristics as a case study. Experimental results show that the trajectory-based metaheuristics embedded with the symmetry parallel search strategy can achieve a competitive performance.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(phan:2024:GECCOcomp, author = {Tri Phan and Khang Tran and Ngoc Hoang Luong}, title = {Combining Adaptive Large Neighborhood Search with Guided Ejection Search for {Real-World} {Multiple-Vehicle} Pickup and Delivery Problem with Time Windows}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {203--206}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {pickup and delivery problem, vehicle routing problem, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654421}, size = {4 pages}, abstract = {The multiple-vehicle pickup and deliver problem with time windows (MV-PDPTW) involves scheduling for a fleet of trucks to timely pickup and deliver orders for as many customers as possible. In this paper, we aim to tackle real-world problem instances where a limited number of vehicles need to handle a large number of orders. We consider a greedy heuristic to construct an initial solution, which is then improved with our customized variants of the Adaptive Large Neighborhood Search (ALNS) and Guided Ejection Search (GES). The hybridization of ALNS and GES shows its potential in forming efficient schedules that successfully serve many customers compared to running solely ALNS or GES.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(salim:2024:GECCOcomp, author = {Sara Salim and Ismail Ali and Sondoss Elsawah}, title = {A Secure Simulation-optimisation Framework for Fleet Mix Problem: A Defence-based Real-world Application}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {207--210}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {optimisation-simulation, data security, fleet mix problem, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654170}, size = {4 pages}, abstract = {This paper presents a secure simulation-optimisation framework tailored for defence scenarios, balancing robust decision-making with data confidentiality. It integrates an enhanced genetic algorithm (GA) for solution generation, a comprehensive simulation model for evaluation, and advanced data security techniques. The framework prioritises robust decision-making and data security, fostering collaboration and setting a standard for secure information sharing. With an emphasis on data confidentiality, it outlines key components, applications to defence fleet mix problems, and potential impacts on secure information sharing. Experimental results show superior performance over alternative algorithms, with average success rates of 3.19\% and 6.34\% in total cost and computational time, respectively, and achieving a 100\% transformation of defence-sensitive data, highlighting its efficacy and security.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sanchez-diaz:2024:GECCOcomp, author = {Xavier F. C. Sanchez-Diaz and Ole Jakob Mengshoel}, title = {Estimating the Number of Local Optima in Multimodal {Pseudo-Boolean} Functions: Validation via Landscapes of Triangles}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {211--214}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {pseudo-boolean functions, local optima, multimodality, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654156}, size = {4 pages}, abstract = {Pseudo-Boolean functions are often multimodal, and it is of interest to find multiple optima. However, the problem of estimating the number of local optima has not been much studied in the combinatorial setting. Since exhaustive enumeration is generally prohibitive, we study an alternative in this paper. Our method, which uses the celebrated Birthday Paradox "in reverse," enables us to estimate the number of local optima in fitness landscapes. We study the method analytically and experimentally, using a new synthetic problem, Triangle. This problem allows us to vary the number of optima and its distribution easily but understandably, which enables analytical validation of our experiments. We conclude by discussing how the approach may be applied and extended in the future.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(velazquez-cruz:2024:GECCOcomp, author = {Rodrigo Fernando {Velazquez Cruz} and Carlos Ignacio {Hernandez Castellanos}}, title = {"A Cooperative Multi {Indicator-Based} Ant Colony Optimization Algorithm for the {MOGenConVRP"}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {215--218}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {ACO, quality indicators, approximated solutions, VRP, external archivers, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654416}, size = {4 pages}, abstract = {Using quality indicators to guide the search on multi-objective evolutionary algorithms (MOEAs) is one of the most prominent approaches. Since quality indicators incorporate the preferences on the Pareto approximation and diversity of solutions. Recently, there has been an interest in algorithms based on the cooperation of multiple indicators, mostly using MOEAs as a baseline.We propose the Cooperative Multi Indicator-Based Ant Colony Optimization algorithm (cMIBACO). A novel algorithm based on the cooperation of three quality indicators (hypervolume, epsilon+, R2) to improve the diversity and avoid premature convergence to a local optimum. This algorithm uses an external archive to obtain the set of approximated solutions.We tested the algorithm against indicator-based ACOs on several instances of the Multi-objective Generalized Consistent Vehicle Routing Problem (MOGenConVRP) and investigated for statistical significance. Our experimental results show that cMIBACO has competitive results to the state-of-the-art and opens the door for further research in this direction.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp2, author = {Xian-Ping Wang and Xin-Xin Xu and Yi Jiang and Sheng Bi and Yan-Chun Liang and Zhi-Hui Zhan}, title = {Distributed Ensemble {Hyper-Heuristic} Algorithm for Aircraft Arrival Sequencing and Scheduling Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {219--222}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {aircraft arrival sequencing and scheduling, hyper-heuristic, ensemble solution construct, distributed cooperative evolution, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654179}, size = {4 pages}, abstract = {Aircraft arrival sequencing and scheduling (ASS) is a challenging NP-hard problem, which aims to arrange the order of aircraft landing at an airport to optimize the use of runway capacity and reduce delays. As air traffic continues to grow, airports face the constant challenge of efficiently managing the flow of incoming flights, emphasizing the need for a fast and efficient algorithm. However, existing algorithms are often extremely time-consuming to construct a near-optimal solution for ASS. To overcome the deficiency in existing algorithms, this paper proposes a fast and efficient distributed ensemble hyper-heuristic algorithm (DEHHA). To fast construct an optimal solution for ASS, DEHHA proposes an ensemble hyper-heuristic-based solution construct (EHHSC) strategy and a distributed cooperative evolution (DCE) strategy. The EHHSC strategy ensembles multiple effective hyper-heuristic optimizers to find diverse solutions and enables the communication of diverse optimizers to effectively find global optimal solution. The DCE strategy integrates the strengths of distributed computing, which enables multiple hyper-heuristic optimizers to cooperatively evolve in parallel to reduce running time. Extensive experiments results indicate DEHHA is not only capable of achieving superior performance in terms of solution quality but also requires a mere 42.9\% of the time needed by the state-of-the-art algorithms.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wu:2024:GECCOcomp, author = {De Ming Wu and Qiang Yang and Chang Jun Zhou and Zhong Long Zheng and Jun Zhang}, title = {Adapted Ant Colony Optimization for {Large-Scale} Orienteering Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {223--226}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {orienteering problem, ant colony optimization, combinatorial optimization, metaheuristics, Evolutionary Combinatorial Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654270}, size = {4 pages}, abstract = {The orienteering problem (OP) is a challenging route optimization problem with the objective of finding an optimal subset of nodes and the optimal path to visit these nodes so that the total profit under the constraint of the cost is maximized. Since ant colony optimization (ACO) algorithms have shown remarkable performance in solving path planning problems, they are likely promising for solving OP. To verify this, this paper takes the first try to adapt five classical ACO algorithms, namely Ant System (AS), Elite Ant System (EAS), Rank based Ant System (ASrank), Min-Max Ant System (MMAS), and Ant Colony System (ACS), to solve OP. To this end, we determine the candidate nodes in the path construction by considering the constraint. To verify the optimization effectiveness of the five ACO algorithms in solving OP, we also take the first attempt to conduct experiments on large-scale OP instances. Experimental results have shown that the five ACO algorithms are very promising for OP and ASrank obtains the best performance on the large-scale OP instances.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yelleti:2024:GECCOcomp2, author = {Vivek Yelleti and Vadlamani Ravi and P. Radha Krishna}, title = {Novelty Detection and Feedback based Online Feature Subset Selection for Data Streams via Parallel Hybrid Particle Swarm Optimization Algorithm}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {227--230}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {online feature subset selection, particle swarm optimization, hybrid evolutionary algorithm, data streams, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654298}, size = {4 pages}, abstract = {In a first-of-its-kind study, we propose an online feature selection (OFS) framework for streaming data under big data paradigm, by proposing (i) parallel hybrid particle swarm optimization (PSO)-based wrapper, (ii) a robust method to handle high velocity and voluminous datasets, and (iii) two vigilance tests for detecting novelty. Our framework involves a continuous and adaptive learning process, by reducing the number of retrains of the wrapper. Moreover, it is scalable by virtue of the parallelization of the PSO and its variants/hybrids resulting in quick responses in real-time. We proposed BBPSO-L+TNS, a hybrid of bare-bones particle swarm optimization guided by logistic distribution (BPPSO-L) and threshold based neighbourhood search (TNS) heuristic, to achieve better exploitation capability and avoid entrapped in local optima. The findings demonstrate the robustness of the proposed streaming framework, yielding cost-effective solutions. Further, BBPSO-L+TNS outperformed the baseline algorithms.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhang:2024:GECCOcomp3, author = {Han Zhang and Qing Li and Xin Yao}, title = {{PEACH:} A {Multi-Objective} Evolutionary Algorithm for Complex Vehicle Routing with {Three-Dimensional} Loading Constraints}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {231--234}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {split delivery, vehicle routing, three-dimensional loading, evolutionary algorithms, multi-objective optimization, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654333}, size = {4 pages}, abstract = {The Split Delivery Vehicle Routing Problem with Three-Dimensional Loading Constraints (3L-SDVRP) combines routing and packing challenges with two objectives (minimizing travel distance and maximizing vehicle loading rate). Meta-heuristic algorithms are effective in addressing 3L-SDVRP, with the balance between exploration (searching broadly for new solutions) and exploitation (focusing near known effective solutions) playing a pivotal role in their performance. However, achieving an optimal balance between exploration and exploitation, especially within the limited computational resources, remains an ongoing challenge. This paper introduces a Pareto-based Evolutionary Algorithm with Concurrent execution of crossover and Hierarchical neighborhood mutation (PEACH) with two novel features. Firstly, a new hierarchical neighborhood mutation is proposed. This mutation employs multiple neighborhood structures to produce diverse offspring from a single parent, thus increasing solution diversity for better exploitation. Additionally, our mutation is hierarchical rather than random, prioritizing mutation for individuals with higher nondomination ranks and guiding the search towards promising regions. Secondly, PEACH applies crossover and mutation concurrently, allowing each individual to undergo either or both processes simultaneously, rather than sequentially. Our experimental results demonstrate PEACH's effectiveness in tackling 3L-SDVRP.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhao:2024:GECCOcomp2, author = {Rui Zhao and Tianyu Liang and Zhize Wu and Daan {van den Berg} and Matthias Thuerer and Thomas Weise}, title = {Randomized Local Search on the 2D Rectangular Bin Packing Problem with Item Rotation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Leslie {Perez Caceres} and Yi Mei}, pages = {235--238}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {bin packing, two-dimensional bin packing, randomized local search, RLS, permutations, Evolutionary Combinatorial Optimization, Metaheuristics: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654139}, size = {4 pages}, abstract = {The two-dimensional orthogonal rectangular Bin Packing Problem without orientation (here abbreviated as 2D-BPP) asks us to place, without overlap, sets of rectangular items into as few rectangular bins as possible. The items can have different sizes and can be rotated by 90 degrees. Their edges must be parallel to the edges of the bins. All bins have the same fixed size. We analyze the performance of Randomized Local Search (RLS) on the recently published 2DPackLib benchmark dataset. The RLS works on the space of signed permutations and applies a variant of the Improved Bottom Left heuristic as decoding step. We test seven objective functions that drive the search towards solutions occupying fewer bins. The RLS yields surprisingly good performance when minimizing the number of bins and, at lower priority, the area under the skyline of objects in the bins. We provide the complete set of results and all algorithm implementations in an immutable archive.}, notes = {GECCO-2024 ECOM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pour-hosseini:2024:GECCOcomp, author = {Mohammad Reza Pour-Hosseini and Mahdi Abbasi and Bahman Javadi and Seifeddine Benelghali and Hassan Haghighi and Ehsan Mohammadi-Pasand and Parham Moradi}, title = {Efficient Edge Computing: Harnessing Compact Machine Learning Models for Workload Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {239--242}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {edge computing, internet of things, workload distribution, network optimization, memory constraints, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654307}, size = {4 pages}, abstract = {This paper proposes an innovative lightweight workload distribution model, TinyXCS, specifically designed for traditional edge devices, emphasizing efficiency and compactness. TinyXCS, an online model based on XCS learning classifier systems, demonstrates high levels of performance in reducing delays and energy consumption. Engineered to operate efficiently on conventional edge devices, The model offers a promising solution for optimizing workload distribution while considering memory constraints. Experimental results validate effectiveness of this model, representing a significant advancement in the quest for streamlined edge computing solutions.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(aki:2024:GECCOcomp, author = {Fuma Aki and Riku Ikeda and Takumi Saito and Ciaran Regan and Mizuki Oka}, title = {{LLM-POET:} Evolving Complex Environments using Large Language Models}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {243--246}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {open-ended evolution, large language models, POET algorithm, evolution gym, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654115}, size = {4 pages}, abstract = {Creating systems capable of generating virtually infinite variations of complex and novel behaviour without predetermined goals or limits is a major challenge in the field of AI. This challenge has been addressed through the development of several open-ended algorithms that can continuously generate new and diverse behaviours, such as the POET and Enhanced-POET algorithms for co-evolving environments and agent behaviour. One of the challenges with existing methods however, is that they struggle to continuously generate complex environments. In this work, we propose LLM-POET, a modification of the POET algorithm where the environment is both created and mutated using a Large Language Model (LLM). By fine-tuning a LLM with text representations of Evolution Gym environments and captions that describe the environment, we were able to generate complex and diverse environments using natural language. We found that not only could the LLM produce a diverse range of environments, but compared to the CPPNs used in Enhanced-POET for environment generation, the LLM allowed for a 34\% increase in the performance gain of co-evolution. This increased performance suggests that the agents were able to learn a more diverse set of skills by training on more complex environments.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(andersen:2024:GECCOcomp, author = {Hayden Andersen and Andrew Lensen and Will Browne and Yi Mei}, title = {Intepretable Local Explanations Through Genetic Programming}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {247--250}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, explainable AI, machine learning, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654370}, size = {4 pages}, abstract = {As machine learning models become increasingly prevalent in everyday life, there is a growing demand for explanation of the predictions generated by these models. However, most models used by companies are black-boxes in nature, without the capacity to provide explanations to users. This reduces public trust in these models, and exists as a barrier to adoption of machine learning. Research into providing explanations to users has shown that local explanation techniques provide more acceptable explanations to users than attempting to explain an entire model, as a user often does not need to understand the entirety of a model.This work builds on prior work in the field to produce a competitive method for high-fidelity local explanations utilising genetic programming. Two different data representations targeted towards both users with and without machine learning experience are evaluated.The experimental results show comparable fidelity to the state-of-the art, while exhibiting more comprehensible explanations due to including fewer features in each explanation. The method enables decomposable explanations that are easy to interpret, while still capturing non-linear relationships in the original model.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(baeta:2024:GECCOcomp, author = {Francisco Baeta and Joao Correia and Tiago Martins and Penousal Machado}, title = {Exploring Evolutionary Generators within Generative Adversarial Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {251--254}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, evolutionary computation, generative adversarial networks, TGPGAN, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654348}, size = {4 pages}, abstract = {Since their introduction, Generative Adversarial Networks (GANs) have represented the bulk of approaches used in image generation. Before GANs, such approaches used Machine Learning (ML) exclusively to tackle the training problems inherent to GANs. However, in recent years, evolutionary approaches have been making a comeback, not only across the field of ML but in generative modelling specifically. Successes in GPU-accelerated Genetic Programming (GP) led to the introduction of the TGPGAN framework, which used GP as a replacement for the deep convolutional network conventionally used as a GAN generator. In this paper, we delve further into the generative capabilities of evolutionary computation within adversarial models and extend the study performed in TGPGAN to analyse other evolutionary approaches. Similarly to TGPGAN, the presented approaches replace the generator component of a Deep Convolutional GAN (DCGAN): one with a line-drawing Genetic Algorithm (GA) and another with a Compositional Pattern Producing Network (CPPN). Our comparison of generative performance shows that the GA used manages to perform competitively with the original framework. More importantly, this work showcases the viability of other evolutionary approaches other than GP for the purpose of image generation.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(baikas:2024:GECCOcomp, author = {Epifanios Baikas and Danesh Tarapore and David Thomas}, title = {Evolutionary Data Subset Selection for {Class-Incremental} Learning on {Memory-Constrained} Systems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {255--258}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {TinyML, supervised learning, pattern recognition and classification, genetic algorithms, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654109}, size = {4 pages}, abstract = {Training Machine Learning classifiers on extreme edge devices with non-volatile memory size le 10 MB is challenging because of a) the small number of data examples that can be preserved on-device and b) the dynamic nature of the training dataset caused by the continual collection of new examples. Learning from a stream of data batches, each consisting of examples from a new class, is studied by class-incremental Continual Learning.We address the challenge of training an ML classifier on an extreme edge device that collects examples in a class-incremental fashion and propose a framework for selecting which examples should be preserved in non-volatile memory. We apply a genetic algorithm to identify data subsets with minimal degradation in the top-1 accuracy of a k-Nearest-Neighbors classifier.We evaluate the proposed subset selection method on MNIST and FashionMNIST datasets and observe top-1 accuracy gains of 26.3 \% and 18.6 \% relative to the mean accuracy obtained by random balanced subsets containing only 0.05 \% of the training set. When the genetic algorithm was applied to a class-incremental setting, it achieved comparable accuracy to random balanced selection with 8\texttimes{} less non-volatile memory in specific cases.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(calabrese:2024:GECCOcomp, author = {Andrea Calabrese and Stefano Quer and Giovanni Squillero and Alberto Tonda}, title = {Towards an Evolutionary Approach for Exploting Core Knowledge in Artificial Intelligence}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {259--262}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {artificial intelligence, compositionality, core knowledge, evolutionary algorithms, rules, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654230}, size = {4 pages}, abstract = {This paper presents a proof of concept for a novel evolutionary methodology inspired by core knowledge. This theory describes human cognition as a small set of innate abilities combined through compositionality. The proposed approach generates predictive descriptions of the interaction between elements in simple 2D videos. It exploits well-known strategies, such as image segmentation, object detection, simple laws of physics (kinematics and dynamics), and evolving rules, including high-level classes and their interactions. The experimental evaluation focuses on two classic video games, Pong and Arkanoid. Analyzing a small number of raw video frames, the methodology identifies objects, classes, and rules, creating a compact, high-level, predictive description of the interactions between the elements in the videos.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chen:2024:GECCOcomp3, author = {Yehang Chen and Yu Zhang and Xinyue Li and Wang Hu}, title = {{Multi-Criterion} Feature Selection Based on Clustering Symbolic Regression}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {263--266}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {feature selection, symbolic regression, clustering, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654288}, size = {4 pages}, abstract = {Feature selection is a highly effective technique for processing high-dimensional datasets. However, existing methods often prioritize the accuracy of constructed models as the main criterion, leading incomplete and unstable outcomes. In this study, the multi-criterion feature selection algorithm based on clustering symbolic regression (FSSR) is proposed to tackle the issue of simplistic criteria in feature selection. The involvement and sensitivity of features are considered as new criteria of feature selection in FSSR. These criteria can be calculated based on the occurrence frequency and partial derivatives in the expressions. Moreover, Pareto non-dominated sorting is incorporated to fully account for both criteria. Furthermore, a clustering symbolic regression algorithm is proposed to preserve local information during the process. By clustering the training set into subsets, the generalization of expressions is improved to enhance the accuracy of feature selection. When applied to the creep life dataset of high-temperature nickel-based alloys, a new FSSR algorithm with domain-specific prior knowledge is proposed for producing more accurate and interpretable results in feature selection}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cook:2024:GECCOcomp, author = {Joshua Cook and Kagan Tumer}, title = {Multidimensional Archive Of The State Space}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {267--270}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {diversity search, MAP elites, robust teaming, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654352}, size = {4 pages}, abstract = {Many real-world multiagent problems that require coordinated behaviors in dynamic environments with potentially dynamic team structures need agents to form robust teams. However, such algorithms often lack the diversity of agent policies needed to provide rich enough experiences to generate truly robust teaming behaviors. Most approaches to training diverse agents address the problem as a fitness evaluation issue, defining an evaluation function that attempts to account for agent diversity. However, these methods struggle to scale to large diverse populations efficiently and often require extensive domain knowledge. In this work, we present Multidimensional Archive of the State Space, MASS, an evolutionary framework that efficiently trains a population of agents to effectively cover the policy space. We show that agents trained using MASS are more diverse and can be used to evolve more robust teaming agents.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(demir:2024:GECCOcomp, author = {Kaan Demir and Bach Nguyen and Bing Xue and Mengjie Zhang}, title = {Evolutionary Sparse Coding and Graph Regularisation for Embedded Multi-label Feature Selection}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {271--274}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary, feature selection, multi-label, sparsity, sparse coding, graph regularisation, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654094}, size = {4 pages}, abstract = {Multi-label datasets often possess hundreds of irrelevant or redundant features that can negatively affect classification performance over multiple co-occuring class labels, necessitating feature selection. Sparsity-based multi-label feature selection has garnered notable attention due to its effectiveness at identifying irrelevant and redundant features. However, most existing sparsity-based feature selection methods are gradient-based and are prone to inconsistency or premature convergence. This paper proposes novel evolutionary sparsity-based approaches for embedded multi-label feature selection. The proposed methods can achieve state-of-the-art classification performance.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(feiden:2024:GECCOcomp, author = {Arno Feiden and Biagio Paparella and Jochen Garcke}, title = {Generalizing Diversity with the Signature Transform}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {275--278}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quality-diversity, robustness, MAP-elites, signature transform, time series analysis, behavioural diversity, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654295}, size = {4 pages}, abstract = {Defining differences is a necessary prerequisite for finding diversity in solutions to a given problem. Diversity, in turn, is a property that is difficult to quantify, but is expected to bring favourable properties.We introduce the signature transform as a general behaviour descriptor to distinguish solutions to control problems specifically in the context of Quality-Diversity and MAP-Elites. We define a robustness score and profile, a structured analysis of the behaviour of agents and populations of agents under a changing environment, as an instance of a beneficial property of diversity.The signature transform integrates well with the CVT-Elites approach and offers a functional generalized diversity measure. The robustness analysis substantiates the abstract diversity induced by distance in signature space through tangible effects and enriches the usual evaluation criteria of diverse populations.The generalization of diversity from handcrafted behaviour descriptors opens the possibility to utilise Quality-Diversity techniques for problems where the alignment of the problem with diversity measures is not obvious.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(hernandez:2024:GECCOcomp, author = {Jose Guadalupe Hernandez and Anil Kumar Saini and Jason H Moore}, title = {Lexidate: Model Evaluation and Selection with Lexicase}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {279--282}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, lexicase selection, AutoML, cross-validation, model evaluation, model selection, model complexity, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654265}, size = {4 pages}, abstract = {Automated machine learning streamlines the task of finding effective machine learning pipelines by automating model training, evaluation, and selection. Traditional evaluation strategies, like cross-validation (CV), generate one value that averages the accuracy of a pipeline's predictions. This single value, however, may not fully describe the generalizability of the pipeline. Here, we present Lexicase-based Validation (lexidate), a method that uses multiple, independent prediction values for selection. Lexidate splits training data into a learning set and a selection set. Pipelines are trained on the learning set and make predictions on the selection set. The predictions are graded for correctness and used by lexicase selection to identify parent pipelines. Compared to 10-fold CV, lexicase reduces the training time. We test the effectiveness of three lexi-date configurations within the Tree-based Pipeline Optimization Tool 2 (TPOT2) package on six OpenML classification tasks. In one configuration, we detected no difference in the accuracy of the final model returned from TPOT2 on most tasks compared to 10-fold CV. All configurations studied here returned similar or less complex final pipelines compared to 10-fold CV.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(herrera-sanchez:2024:GECCOcomp, author = {David Herrera-Sanchez and Hector-Gabriel Acosta-Mesa and Efren Mezura-Montes and Aldo Marquez-Grajales}, title = {Shifting Color Space for Image Classification using Genetic Programming}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {283--286}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, color space, image classification, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654430}, size = {4 pages}, abstract = {Image classification is an important application in different areas of computer vision. Genetic Programming (GP) has demonstrated an excellent capability to deal with this task. However, many approaches transform images to grayscale due to the computational cost of working with color spaces. Therefore, possible significant image features for classification could be discarded early. Consequently, our proposal incorporates features extracted from different color spaces for image classification through GP in three datasets. Furthermore, the GP's flexibility allows the images' color space conversion during the evolutionary process. According to the dataset, the final solution determines whether a specific color space was used or if it was necessary to change to another to extract meaningful features from the images. The results demonstrate that changing the color space in some data sets was necessary to achieve competitive classification accuracy.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kelly:2024:GECCOcomp, author = {Luke Kelly and Martin Masek and Chiou Peng Lam}, title = {Behaviour Discovery in {Real-Time} Strategy Games using Cooperative Co-evolution with Dynamic Binary Tree Decomposition}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {287--290}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {cooperative co-evolution, dynamic decomposition, binary tree, real-time strategy games, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654222}, size = {4 pages}, abstract = {In cooperative co-evolutionary approaches, a larger global problem is typically decomposed into multiple sub-components, with solutions concurrently evolved for each. Problem decomposition commonly occurs prior to the execution of the algorithm, but this assumes that the optimal decomposition is already known. However, the optimal decomposition is typically unknown for problems with a large number of components, such as multi-agent systems featuring many agents, where the challenge of decomposing the problem grows with the scale of the number of agents involved. In this paper, we propose a dynamic decomposition scheme based on determining optimal groupings of agents, whilst simultaneously evolving the agent behaviour, specifically addressing scalability challenges inherent in multi-agent systems featuring a large number of agents. The approach was evaluated in a real-time strategy game by evolving the behaviour of a heterogeneous team of agents representing combat units. Four scenarios were investigated featuring an increasingly larger number of agents. We compared our approach against two cooperative co-evolutionary algorithms involving commonly used static decomposition schemes. The proposed dynamic decomposition approach matched or exceeded the performance of these two static schemes across all scenarios.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kelly:2024:GECCOcomp2, author = {Luke Kelly and Martin Masek and Chiou Peng Lam and Brandon Abela and Ashu Gupta}, title = {Evolving Visual Counterfactual Medical Imagery Explanations with Cooperative Co-evolution using Dynamic Decomposition}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {291--294}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {cooperative co-evolution, dynamic decomposition, explainable AI, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654224}, size = {4 pages}, abstract = {Explainable AI (XAI) is becoming increasingly vital in healthcare, particularly in understanding decisions made by complex, black-box models in medical diagnostics. Amid discussions on balancing AI's predictive accuracy with interpretability, 'post-hoc' XAI methods are a potential solution, offering interpretability after model training without compromising diagnostic precision. Counterfactual explanations are one such 'post-hoc' XAI technique, where these explanations modify input data, like features in an X-ray image, to see how such changes affect AI image-driven diagnoses, offering insights into the model's reasoning. However, the complexity of medical image data poses challenges in generating these counterfactuals. To address this challenge, we propose a novel Cooperative Co-evolution approach with dynamic decomposition for explainability in AI for medical diagnostics by evolving visual counterfactual explanations for medical images without requiring preliminary assumptions about image decomposition. The method, evaluated using MNIST and Diabetic Foot Osteomyelitis datasets and benchmarked against a genetic algorithm, was capable of generating counterfactual explanations for medical images, showing advantages with larger images.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kim:2024:GECCOcomp, author = {Jingeun Kim and Yourim Yoon}, title = {Efficient Pruning of {DenseNet} via a {Surrogate-Model-Assisted} Genetic Algorithm}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {295--298}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {deep learning, genetic algorithm, neural networks, surrogate model, fitness approximation, model compression, pruning, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654409}, size = {4 pages}, abstract = {In recent years, convolution neural networks have made remarkable progress in computer vision. These networks have a large number of parameters, which are required to be limited in resource-constrained environments. To solve this problem, various lightweight methods have been proposed, among which network pruning is known as a combinatorial optimization problem. Heuristic algorithms are known to be more efficient than exhaustive search in solving such combinatorial optimization problems. Therefore, this paper uses a surrogate-model-assisted genetic algorithm (SMA-GA) for network pruning. The DenseNet-BC (k=12) model was used as the baseline model. A multi-dimensional encoding scheme is used for the genetic algorithm, and the fitness value was approximated through a surrogate model to explore a more extensive search space. On the CIFAR-10 and CIFAR-100, the error rate and the number of parameters resulting from SMA-GA were compared with the baseline model. For CIFAR-10, the number of parameters was reduced by up to 36.25\%, and the error rate was reduced by 0.39\%. For CIFAR-100, the number of parameters was reduced by up to 22.5\%, and the error rate was reduced by 0.91\%. These results demonstrate that the proposed method effectively reduces the number of parameters with a negligible impact on the error rate.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pandey:2024:GECCOcomp, author = {Suraj Kumar Pandey and Shivashankar B Nair}, title = {Onboard Class Incremental Learning for {Resource-Constrained} scenarios using Genetic Algorithm and {TinyML}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {299--302}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {class incremental learning, neural networks, genetic algorithms, TinyML, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654392}, size = {4 pages}, abstract = {Deploying Machine Learning (ML) models in real-world settings over resource-constrained edge devices has always been a challenging task. While TinyML tackles this issue to an extent, by mostly using pre-trained Deep Learning (DL) models, the static nature of such models renders them ineffective for non-stationary data. A model having a low memory footprint that could allow onboard Class Incremental Learning (CIL) so as to accommodate data from new classes and also avoid catastrophic forgetting, is thus, the need of the day. The work described in this paper endeavours to meet this need by providing a method that utilises TinyML to accommodate a DL model onboard a resource-constrained device. To enable onboard CIL over the DL model, the method leverages Latent Replays (as exemplars) and a Genetic Algorithm (GA) to create a multi-fitness landscape that facilitates the inclusion of new class data, suppresses catastrophic forgetting and keeps a check on the quality of exemplars. Experiments and comparisons conducted for a gesture recognition task using time-series data with the proposed method deployed on a microcontroller, show the effectiveness of augmenting TinyML DL models with the GA for onboard CIL.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(paetzel:2024:GECCOcomp, author = {David Paetzel and Richard Nordsieck and Joerg Haehner}, title = {Length-niching Selection and Spatial Crossover in Variable-length Evolutionary Rule Set Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {303--306}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, variable-length representation, metaheuristic rule set learning, learning classifier systems, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654308}, size = {4 pages}, abstract = {We explore Metaheuristic Rule Set Learners with variable-length encodings for regression tasks by performing a first benchmark of two variable-length operators that have not yet seen use in this context: Spatial crossover is a well-known operator which seems to be a natural fit for these systems since it is meant to promote building blocks in continuous parameter spaces like the ones defined by interval-based rule conditions. Length-niching selection is a recently proposed operator that promotes population diversity with respect to solution length which is meant to prevent some forms of premature convergence. We perform comparisons with other established operators (i. e. tournament selection and cut-and-splice crossover) within a simplistic GA that uses the corrected Akaike Information Criterion as a fitness measure. The 54 learning tasks considered are synthetic and highly likely to be learnable by the algorithms considered. While all variants of the GA tested perform similarly in terms of Mean Absolute Error after a fixed number of iterations, with respect to solution complexity (i. e. number of rules in the best found solution), not using any crossover seems to outperform both crossover operators tested.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rangu:2024:GECCOcomp, author = {Gayathri Rangu and Shivashankar Nair}, title = {Immuno-inspired Selective Aggregation for Decentralized Federated Deep Reinforcement Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {307--310}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {federated learning, deep reinforcement learning, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654292}, size = {4 pages}, abstract = {Conventional approaches to Federated Deep Reinforcement Learning (FDRL) often mandate the participation of all the associated devices and perform indiscriminate aggregation of the models. This can, at times, culminate in a low-performance global model being pumped back to the devices causing a setback in the performances of all the local models. Aggregation should thus, be performed judiciously based on the model performance. Unlike offline Reinforcement Learning (RL) setups, when datasets and ground truths are unavailable and data needs to be gathered in real-time, defining a suitable metric for the model performance, can be challenging. In this paper, we propose a novel Immuno-inspired approach for Selective Aggregation suitable for decentralized FDRL (dFDRL) that can act as a metric for the performance of a model based on which the decision to aggregate could be made.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sinha:2024:GECCOcomp, author = {Nabangshu Sinha and Carlo Lucheroni}, title = {Time-varying Echo State Networks: Harnessing Dynamic Parameters for Robust {Time-Series} Analysis}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {311--314}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neural networks, time series forecasting, echo state networks, dynamic models, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654425}, size = {4 pages}, abstract = {This work presents a novel variant of Echo State Network (ESN) known as Time-Varying Echo State Network (TV-ESN) and conducts a comprehensive comparative analysis with the standard ESN model. TV-ESN introduces dynamic variations in the leaking rate (a) and spectral radius (rho) over time, offering adaptability to diverse temporal patterns. The evaluation is performed on benchmark datasets, including the Mackey Glass system and actual electricity demand in the Spanish Electricity Market. Results indicate that TV-ESN achieves superior forecasting performance, as evidenced by lower Normalized Root Mean Square Error (NRMSE) and Mean Absolute Error (MAE) compared to the conventional ESN. Moreover, TV-ESN exhibits enhanced stability in forecasting. The findings suggest the potential of TV-ESN in capturing rich temporal characteristics.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(uwano:2024:GECCOcomp, author = {Fumito Uwano and Will Neil Browne}, title = {Cognitive Learning System for Sequential Aliasing Patterns of States in Multistep {Decision-Making}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {315--318}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {learning classifier system, building block, perceptual aliasing, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654110}, size = {4 pages}, abstract = {Perceptual aliasing is a cognitive problem for a learning agent where the robot cannot distinguish its state via its immediate observations, leading to poor decision-making. Previous work addresses this issue by storing the agent's path to learn the optimal policy. In particular, FoRsXCS utilises a fundamental and unique path to identify and disambiguate all aliased states and learn optimal policies in an environment with aliased states. However, it is hard to identify the aliased states in sequential aliasing patterns of states where the aliased states occur sequentially within a regular pattern. This work proposes a new cognitive learning system to identify such sequential aliasing patterns of states by extending FoRsXCS. The experimental results show that the proposed system performs equal to or greater than the existing systems in nine mazes for navigation tasks and significantly outperforms existing techniques in mazes with sequential aliasing patterns. Concretely, the proposed method improves its performance by 0.48 steps compared with FoRsXCS.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(vandergrift:2024:GECCOcomp, author = {Matthew Vandergrift and Ting Hu}, title = {{ATOMIC:} an Interpretable Clustering Method Based on Data Topology}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {319--322}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {interpretable clustering, clustering methodology, explainable artificial intelligence, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654271}, size = {4 pages}, abstract = {State-of-the-art clustering algorithms are well equipped to partition a dataset, but provide no insight into their process for selecting a particular partition. As such clustering strategies which work in an interpretable manner, i.e., in a way that can be understood, are desired. Current explainable clustering approaches focus mainly on explaining k-means clustering, which limits their scope to problems where clusters are spherical and convex. Additionally, many of these algorithms struggle to produce explanations on high dimensional data due to their computational complexity. We propose an interpretable clustering methodology which addresses these challenges. Our algorithm, ATOMIC: Analysis of Topology Oriented Method for Interpretable Clustering, is designed to answer the question "why does my data fit into distinct clusters?". This is done by identifying a set of variables that are responsible for isolating a cluster of data-points from the rest in the dataset. To locate partitions which are defined by specific variables we utilize Novelty Search with Local Competition. ATOMIC relies on basic concepts from topology to partition the dataset. We test our algorithm on well-studied high-dimensional datasets, along with performance comparisons to state-of-the-art clustering methodologies. We compare our algorithm in terms of interpretability to other interpretable and explainable clustering methodologies.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yatsu:2024:GECCOcomp, author = {Naoya Yatsu and Hiroki Shiraishi and Hiroyuki Sato and Keiki Takadama}, title = {Generating {High-Dimensional} Prototypes with a Classifier System by Evolving in Latent Space}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {2127--2130}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {learning classifier systems, supervised classifier system, prototype generation, data mining, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654248}, size = {4 pages}, abstract = {Prototype Generation mainly generates classifiable instances in the observation space but often faces the curse of dimensionality as it directly searches high-dimensional spaces with metaheuristics such as genetic algorithms. To mitigate this problem, this paper proposes a novel PG method called kNNUCS-PG w/ LS-OvL. In detail, the proposed method generates high-dimensional prototypes using LS-OvL, which indirectly learns prototypes in the observation space by evolving in latent space for kNNUCS-PG, one of the sUpervised Classifier Systems with a point representation representing a prototype. Through the intensive experiments on MNIST and fashion-MNIST datasets have revealed the following implications: (1) the classification accuracy of kNNUCS-PG w/ LS-OvL is higher than that of the conventional prototype generation methods, showing a statistically significant difference; (2) the number of prototypes of kNNUCS-PG w/ LS-OvL is smaller than that of kNNUCS w/o LS-OvL ; and (3) based on (1) and (2), the indirect prototype generation through dimensionality reduction by VAE is effective in high-dimensional prototypes.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yu:2024:GECCOcomp2, author = {Bai-Jiang Yu and Feng-Feng Wei and Tai-You Chen and Wei-Neng Chen}, title = {An Improved Evolutionary Reinforcement Learning Algorithm for {UAV} Online Target Tracking}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {323--326}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {unmanned aerial vehicle systems, target tracking, evolutionary reinforcement learning, reinforcement learning, evolutionary algorithms, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654324}, size = {4 pages}, abstract = {Target tracking for unmanned aerial vehicles (UAVs) is significant in a variety of applications and has high research value. Due to the poor capability of responding to deceptive reward signals and lack of diverse exploration, common reinforcement learning (RL) has limitations in real-time decision-making tasks while evolutionary algorithms (EAs) can compensate for these shortcomings by utilizing fitness metrics and strong exploration ability. In this work, we propose an algorithm named improved evolutionary reinforcement learning (IERL) for online UAV target tracking. Firstly, a realistic UAV online target tracking problem is formulated, considering both the velocity change constraints and sparse reward. Based on this problem formulation, an improved selection operator and an improved interaction setting are proposed to achieve higher individual selection efficiency and higher optimization efficiency, respectively. Simulation results show that the proposed algorithm achieves better performance than the comparison algorithm in the sparse reward tracking tasks.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhou:2024:GECCOcomp, author = {Yuheng Zhou and Haopu Shang and Yu-Chang Wu and Chao Qian}, title = {{Instance-Label} Based {Multi-Label} Active Learning by Evolutionary {Multi-Objective} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Jean-Baptiste Mouret and Kai Qin}, pages = {327--330}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-label classification, active learning, multi-objective optimization, evolutionary learning, Evolutionary Machine Learning: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654388}, size = {4 pages}, abstract = {Multi-label active learning has garnered significant attention due to its potential to make use of unlabeled data with limited labeling budget by selectively querying the most valuable unlabeled data. As querying an instance with all potential labels is costly especially in multi-label classification, the instance-label based methods, which pair an instance with one potential label as a single unit for querying, has emerged and become prevailing. However, how to properly balance different aspects of selection like uncertainty, diversity and label association under instance-label pair scenarios still remain underexplored. In this work, we propose to formulate it as a multi-objective optimization problem, and design a novel multilabel active learning method based on evolutionary multi-objective optimization, which iteratively selects batches of instance-label pairs for labeling by optimizing uncertainty, diversity, and label association simultaneously. Experimental results on benchmark datasets demonstrate that our proposed method can outperform state-of-the-art instance-label based active learning methods.}, notes = {GECCO-2024 EML A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ghodsi:2024:GECCOcomp, author = {Siamak Ghodsi and Sahar Tahmasebi and Mahdi Jalili and Parham Moradi}, title = {{Many-Objective} Evolutionary Optimization using Density Peaks Scoring Selection Strategy}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {331--334}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {many-objective optimization, density peaks clustering, pareto front, non-dominated strategy, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654217}, size = {4 pages}, abstract = {In this paper we propose a many-objective optimization method tailored to efficiently address challenges posed by problems with high numbers of objectives. Our method aims to generate solutions that exhibit both convergence and diversity across the objective space. Moreover, a fast non-dominated sorting procedure is employed to ensure the convergence of the evolutionary population. To maintain the diversity of solutions, the method utilizes a novel scoring strategy based on both local and global densities of individuals in the solution space. It selects a population in which each member demonstrates a high local density, ensuring they are appropriately spaced apart to ensure diversity. The result of experiments on two widely used benchmarks alongside with six popular baselines show that the proposed method outperforms its competitors.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(gomez-lopez:2024:GECCOcomp, author = {Juan Carlos Gomez-Lopez and Daniel Castillo-Secilla and Jesus Gonzalez}, title = {Improving the Performance of {EA-based} Multi-population Models for Feature Selection Problems by Reducing the Individual Size in the Initial Population}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {335--338}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary procedures, energy-aware computing, feature selection, high-dimensional data, EEG classification, multi-population models, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654424}, size = {4 pages}, abstract = {Multi-population models based on evolutionary algorithms have been successfully applied to countless feature selection problems. One of the most relevant aspects is the initialization of the individuals that form the initial population, where any bias should be avoided. Usually, the initial individuals are randomly generated with the aid of the pf hyperparameter (the selection probability for any feature when generating an individual of the initial population). pf is usually set to 0.5 (the same chance of selecting or not any feature). However, different values for this hyperparameter may influence the results finally found and the execution time and energy consumed by the algorithm. This paper analyzes the behavior of a multi-objective and multi-population procedure when setting pf to different values for several datasets. The results show that there is an optimal value of pf for each dataset that speeds up the process without affecting the quality of the solutions, in addition to reducing the size of the final subset of features in some cases.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(heidari:2024:GECCOcomp, author = {Arash Heidari and Sebastian {Rojas Gonzalez} and Tom Dhaene and Ivo Couckuyt}, title = {Lower Confidence Bound for Preference Selection in Interactive {Multi-Objective} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {339--342}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {bayesian optimization, multi-objective optimization, interactive optimization, preference learning, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654317}, size = {4 pages}, abstract = {In multi-objective optimization, the goal is to find the non-dominated or Pareto-optimal set that reveals the optimal trade-offs among the conflicting objectives. Conventionally, the Decision-Maker (DM) selects their preferred solution from this set post-optimization. Evidently, this approach necessitates the computation of numerous non-dominated solutions, incurring high computational costs when the evaluation of the objectives is expensive. To address these challenges, interactive optimization offers a potentially more efficient approach: sequential interactions with the DM during the optimization process, enabling the judicious allocation of the optimization budget by guiding the process toward the most desirable regions of the Pareto front. In this study, we propose to exploit the well-known Lower Confidence Bound acquisition function in Bayesian optimization, to interactively estimate the DM's preferred solution in a data-efficient manner, even in scenarios where the DM experiences uncertainty in their decision-making process.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(liang:2024:GECCOcomp, author = {Qingwei Liang and Shu-Chuan Chu and Pei-Cheng Song and Jia Zhao and Zne-Jung Lee and Jeng-Shyang Pan}, title = {A Multi-objective Evolutionary Algorithm based on Hierarchical Grouping for Large-scale Multi-objective Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {343--346}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {differential grouping, decomposition, multi-objective optimization, large-scale optimization, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654126}, size = {4 pages}, abstract = {Variable decomposition have been widely applied to large-scale multi-objective problems. Existing studies usually consume a large amount of computational resources in the decomposition phase. To this end, this paper proposes a multi-objective evolutionary algorithm based on hierarchical grouping (MOEA-HG). The algorithm contains a decomposition phase and an optimization phase. In the decomposition phase, diversity variables and convergence variables are first identified. Secondly, the concept of contribution is introduced. Convergence variables with equal contribution to the optimization objectives constitute a subcomponent. In the optimization stage, collaborative optimization is proposed to deal with convergence subcomponents and diversity subcomponents separately. Experimental results show that MOEA-HG consumes less computational resources in identifying variable interactions than other decomposition-based MOEAs. Furthermore, MOEA-HG has significant advantages over the five state-of-the-art MOEAs in terms of optimization performance.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(liu:2024:GECCOcomp, author = {Likun Liu and Ryosuke Ota and Takahiro Yamamoto and Naoki Hamada and Daisuke Sakurai}, title = {Visualization of Multiobjective Multimodal Benchmarking Based on Basin Connectivity}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {347--350}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multiobjective optimization, visualization, benchmarking, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654190}, size = {4 pages}, abstract = {Visualization is useful for understanding the behavior of Multiobjective Evolutionary Algorithms (MOEAs). However, it has been challenging to investigate how an MOEA may be trapped by local Pareto optima, in which case some individuals stay inside a "valley" (also known as basins of attraction). To address this, we propose a novel visualization that projects the high-dimensional basins in a 2D plot, in which the individuals of benchmarked MOEAs for user-specified generations are visible. For this, we target specialized benchmark problems known as the Benchmarking Based on Basin Connectivity (3BC), which explicitly models basins of attraction and thus allows us to precompute the necessary geometry as what is called the Reeb space. Our demonstration shows that the proposed visualization displays basins in benchmark problems as intended and, reveals the differences in MOEAs' behaviors in response to the basins. The source code of our system is publically available at https://github.com/dsakurai/benchmark-visualizer.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(mai:2024:GECCOcomp, author = {Ruitao Mai and Yifeng Qiu and Wenji Li and Zhaojun Wang and Biao Xu and Wei Chen and Jiafan Zhuang and Yun Li and Zhun Fan}, title = {Expensive Constrained Multi-objective Evolutionary Algorithm with Pareto Set Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {351--354}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {expensive constrained multi-objective optimization, pareto set learning, push and pull search, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654144}, size = {4 pages}, abstract = {Expensive constrained multi-objective optimization problems (ECMOPs) necessitates the acquisition of feasible optimal solutions within a limited number of evaluations. To address these challenges, we introduce a neural network-based Pareto set learning method. This method enhances search efficiency by learning the relationship between weight vectors in the objective space and their corresponding Pareto optimal solutions, thus enabling direct prediction of solutions within the Pareto set. Additionally, we propose a push and pull search algorithm based on Pareto set learning (PPS-PSL) for ECMOPs. This algorithm operates in two stages: learning unconstrained Pareto sets (UPS) in the push stage and constrained Pareto sets (CPS) in the pull stage, while employing evolutionary algorithms to guide the population away from local optima. Comparative experimental results, conducted on fourteen benchmark problems against six comparison algorithms, demonstrate the superiority of our proposed approach.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nickelson:2024:GECCOcomp, author = {Anna Nickelson and Kagan Tumer}, title = {Redefining the Behavior Space for {Multi-Objective} {MAP-Elites}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {355--358}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quality diversity, multi-objective, map-elites, evolutionary learning, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654351}, size = {4 pages}, abstract = {Autonomous agents are increasingly deployed in complex real-world environments where they must balance multiple goals and adapt to novel scenarios. Unfortunately, learning on the fly to handle new situations can produce dangerous or unstable policies. Creating a diverse set of pre-trained policies overcomes this problem, and evolving those policies using multiple objectives increases their applicability in many domains. Multi-Objective Map Elites (MOME) addresses this issue by searching for a population of policies that are both high performing on a set of objectives as well as diverse across a set of defined behavior metrics, referred to as the Behavior Descriptor (BD). However, the characteristics of the BD that enhance learning and improve search across the behavior and multiobjective spaces in an Reinforcement Learning (RL) setting are still relatively unknown. This work investigates qualities of the BD for MOME that improve search for good policies, in turn, enabling better decision-making. We show that redefining the behavior space can improve coverage of the multi-objective space by up to 36\%.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nitschke:2024:GECCOcomp, author = {Geoff Nitschke and Bilal Aslan and Flavio Correa {Da Silva}}, title = {{Multi-Objective} Evolution for Chemical Product Design}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {359--362}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary multi-objective optimization, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654422}, size = {4 pages}, abstract = {The design of chemical products requires the optimization of desired properties in molecular structures. Traditional techniques are based on laboratory experimentation and are hindered by the intractable number of alternatives and limited capabilities to identify feasible molecules and either test or infer their properties for optimization. Computational techniques based on deep learning and multi-objective evolutionary optimization have spurred chemical product design, but the definition of appropriate metrics to compare techniques is challenging. We suggest the adoption of two complementary assessments to account for quantitative as well as qualitative features of different techniques, and then test our proposed assessments by comparing two heuristics to build new generations of molecular candidates, termed respectively, direct correlation and extended search.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(saborido:2024:GECCOcomp, author = {Ruben Saborido and Ana B. Ruiz and Sandra Gonzalez-Gallardo and Mariano Luque and Antonio Borrego}, title = {Composite Quality Indicators to Evaluate the Performance of Population-based Multiobjective Optimization Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {363--366}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quality performance indicators, composite indicators, population-based multiobjective optimization algorithms, aspiration and reservation levels, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654201}, size = {4 pages}, abstract = {The performance of population-based multiobjective optimization can be evaluated using quality indicators (QIs) assessing the quality of the approximation set generated according to convergence, spread, and uniformity (the combination of the last two is known as diversity). Since not all QIs can capture all these properties, we propose to aggregate existing QIs into a single measure informing about the algorithm's performance from a more general perspective. To synthesize the desired QIs, we build three composite quality indicators (CQIs) based on the double reference point approach (named DRP-based CQIs): weak, strong, and mixed CQIs. Our approach enables using desirable ranges for the values of the aggregated QIs, defined by so-called aspiration and reservation levels, which allows us to know which algorithms are performing better, within, or worse than the desired limits. Each DRP-based CQI enables a different compensation degree among the QIs, and the joint use of the three of them permits a deep insight into the algorithms' performance. Finally, we demonstrate the benefits of our proposal when comparing a large number of population-based algorithms on three-, five-, and eight-objective optimization problems.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(shahane:2024:GECCOcomp, author = {Ananta Shahane and Niki {Van Stein} and Yingjie Fan}, title = {A Corridor Model Evolutionary Algorithm for Fast Converging Green Vehicle Routing Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {2131--2134}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, evolutionary algorithm, corridor model, green vehicle routing problem, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654323}, size = {4 pages}, abstract = {In this paper, we introduce an innovative evolutionary algorithm for the Green Vehicle Routing Problem (GVRP). We use the relative location of the customers with respect to the depot to estimate whether a mutation is exploitation or exploration. By introducing a probabilistic corridor model, our mutation strategy converges faster to optimal solutions compared to random mutation strategies in existing research. We then introduce our evolutionary algorithms that use this mutation strategy to reduce the number of hyper-parameters to two or three. We test our algorithms across 92 benchmarks. Results show that our algorithms are robust and efficient.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(tagawa:2024:GECCOcomp, author = {Yudai Tagawa and Hernan Aguirre and Kiyoshi Tanaka}, title = {Simple Distributed Bit Climber for Many-objective Optimization of Binary Epistatic Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {367--370}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective optimization, distributed bit climber, solution hypervolume, scalarizing function, MNK-landscapes, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654378}, size = {4 pages}, abstract = {We study a distributed bit climbing algorithm for many-objective optimization of binary epistatic problems. This algorithm decomposes the many-objective problem into a minimum number of single-objective problems, specified by the original evaluation functions and one additional scalarizing function that computes the solution hypervolume. Random bit climbers optimize separately the single-objective functions until they reach a local optimum and restart their search from a bounded population of non-dominated solutions collected from the solutions generated by all climbers. In this work, we study the effects on search performance of bounding the non-dominated population to support the search, two methods to truncate the population, one based on crowding distance and the other one on weighted sum scalarizing functions. Also, we study the importance of the solution hypervolume as a scalarizing function to explore the central regions of objective space. We evaluate the method on subclasses of epistatic problems using MNK-landscapes and compare with MOEA/D and NSGA-III, showing that the simpler distributed bit climber is superior to the other MOEAs in all but one subclass of problems.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(taylor:2024:GECCOcomp, author = {Kendall Peter Taylor and Huong Ha and Minyi Li and Xiaodong Li and Jeffrey Chan}, title = {Accelerated Bayesian Preference Learning for Efficient Evolutionary Multi-objective Optimisation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {371--374}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective optimisation, interactive evolutionary algorithms, bayesian optimisation, preference learning, active learning, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654236}, size = {4 pages}, abstract = {Optimising multi-objective problems using evolutionary algorithms often results in many trade-off solutions due to conflicting objectives. It is then a daunting task for the end user to select a solution for implementation. Progressive elicitation of preferences during optimisation helps ameliorate this problem by directing the search toward regions of interest and away from undesirable solutions. We propose an approach called accelerated Bayesian preference learning (ABPL), which substantially reduces the number of queries needed to find preferred solutions and minimises expensive algorithm evaluations. We identify promising solutions exhibiting similar preference characteristics from previous data and warm start the preference model. We propose to use the newly acquired information during the optimisation to find additional solutions and present them to the user in conjunction with suggestions from BO.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(tomczyk:2024:GECCOcomp, author = {Michal Tomczyk and Milosz Kadzinski}, title = {Interactive tool for visualizing the comprehensive performance of evolutionary multi-objective algorithms applied to problems with two or three objectives}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {375--378}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary multi-objective optimization, visualization, robustness analysis, software development, interactive tools, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654237}, size = {4 pages}, abstract = {The performance of evolutionary algorithms for multi-objective optimization is typically assessed by considering only the final populations. This is troublesome for the following reasons. First, it ignores most solutions constructed throughout the evolution, thus not portraying overall progression. Second, evolutionary methods are susceptible to randomness. Therefore, the results obtained from a single test run are unreliable. Third, these methods are complex algorithms that can be evaluated from many perspectives, not just by assessing the qualities of the final solutions they present. Overcoming these issues motivated the development of our novel visualization tool that accounts for robustness in assessment. It supports both 2D and 3D visualization, and, in the case of the latter, it introduces interactivity, allowing the inspection of presented results in a manner most suited to the user. The developed software is an inherent part of this paper and can be downloaded and re-used by researchers to foster their research.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp3, author = {Xueqing Wang and Jinhua Zheng and Juan Zou and Zhanglu Hou and Shengxiang Yang and Yuan Liu}, title = {A Dynamic Preference-driven Evolutionary Algorithm for Solving Dynamic Multi-objective Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {379--382}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {dynamic multi-objective optimization, evolutionary algorithm, inverse model, preference information, reference points, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654148}, size = {4 pages}, abstract = {Considering the decision-maker's preference information in static multi-objective optimization problems (MOPs) has been extensively studied. However, incorporating dynamic preference information into dynamic MOPs is a relatively less explored area. This paper introduces a preference information-driven DMOEA and proposes a preference-based prediction method. Specifically, a preference-based inverse model is designed to respond to the time-varying preference information, and the model is used to predict an initial population for tracking the changing ROI. Furthermore, a hybrid prediction strategy, that combines a linear prediction model and estimation of population manifolds in the ROI, is proposed to ensure convergence and distribution of population when the preference remain constant. The experimental results show that the proposed algorithm has significant advantages over existing representative DMOEAs through experimental tests on 19 common test problems.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yamada:2024:GECCOcomp, author = {Yutaro Yamada and Kento Uchida and Shinichi Shirakawa}, title = {How to Make {Multi-Objective} Evolutionary Algorithms Invariant to Monotonically Increasing Transformation of Objective Functions}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {383--386}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective optimization, invariance, monotonically increasing transformation, ranking-based transformation, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654381}, size = {4 pages}, abstract = {Invariance properties are important for evolutionary algorithms to ensure consistent behavior and performance on broader classes of objective functions. Owing to the ranking-based transformation, several evolutionary algorithms for single-objective optimization possess the invariance property to monotonically increasing transformation of objective functions, which reduces parameter tuning costs. In contrast, in multi-objective evolutionary algorithms (MOEAs), the monotonically increasing transformation of objective functions often change the behavior of existing methods and degrade their performance because they can change the shape of the Pareto front to intractable forms, such as concave and discontinuous shapes. This study proposes the ranking-based transformation (RT) framework that gives the invariance property to monotonically increasing transformations to existing MOEAs. After MOEA generates solutions following its original procedure, RT framework computes the utility values of the solutions for each objective function. These utility values are given by a non-increasing transformation of their ranking on the unbounded archive of non-dominated solutions. RT framework then updates the population so that the MOEA acquires the Pareto optimal solutions on the utility functions. The experimental results of the bi-objective benchmark problems show that RT framework can resolve the performance deterioration of MOEA/D-DE and NSGA-II under monotonically increasing transformations.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhao:2024:GECCOcomp3, author = {Wanqiu Zhao and Qi Qiu and Hong Zhao and Xu Bian and He Yu and Jing Liu and Xuesong Mei}, title = {Multi-population for Multi-objective Genetic Algorithm with Adaptive Information Sharing Strategy for Berth Allocation and Quay Crane Assignment Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {387--390}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {berth allocation, quay crane scheduling, hybrid heuristics initialization, adaptive information sharing, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654272}, size = {4 pages}, abstract = {For the freight transportation industry, the method of allocating container terminal berths and shore bridges is particularly important. Existing berth and bridge allocation methods mainly rely on manual methods, which are usually difficult to deal with large-scale schedule problems and face high cost and inefficiency problems, so there is an urgent need to design an efficient berth and bridge allocation method. Berths and quay cranes are scarce resources for container terminals and a sound scheduling plan is conducive to improving port mobility. This paper focuses on the integrated Berth and Quay Crane Assignment (BQCA) problem under continuous and dynamic conditions, to minimize the total waiting time and deviation distances from the preferred berth of ships. A Multi-Population for Multi-Objective Genetic Algorithm with Adaptive Information Sharing (MPMOGA-AIS) is proposed for solving the BQCA problem, which includes a Hybrid Heuristic Initialization (HHI) strategy and an Adaptive Information Sharing (AIS) strategy utilizing local and global information. The proposed MPMOGA-AIS is tested on the latest dataset from Huawei, and the experimental results are compared with the port result and NSGAII, which shows that our MPMOGA-AIS algorithm provides better feasible solutions for BQCA problems.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhou:2024:GECCOcomp2, author = {Jinlong Zhou and Yinggui Zhang and Fan Yu}, title = {A Competitive Swarm Algorithm Based on {Tri-Competitive} Criterion for Constrained Multiobjective Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {391--394}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {competitive swarm optimizer, tri-competitive criterion, constrained multiobjective optimization, evolutionary optimization, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654143}, size = {4 pages}, abstract = {Constrained multiobjective optimization stands as a focal point in the evolutionary computation community. Some promising algorithms for constrained multiobjective evolutionary optimization have been proposed. However, as the number of decision variables increases, canonical algorithms encounter difficulties. Competitive swarm optimizer (CSO) has demonstrated highly promising performance. In classical CSO, only half of the particles are updated in each iteration. This may lead to an inefficient search of the optimizer under limited computational resources. To address the issue, this study proposes a competitive swarm algorithm based on a tri-competitive criterion, aiming to enhance the search efficiency of the algorithm in high-dimensional decision variable scenarios. Specifically, two-thirds of the particle swarm is updated in each iteration by introducing the tri-competitive criterion. Moreover, to deal with various constraints, we use an epsilon method that integrates principles from multiobjective optimization. The computational experiments were conducted on two benchmark test suites with up to 100 decision variables. The experimental results indicate that the proposed algorithm has demonstrated highly competitive performance compared to peer algorithms.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhu:2024:GECCOcomp, author = {Xinyuan Zhu and Yuji Sato and Mikiko Sato}, title = {The Optimal Zone Breadth for Parallel and Distributed {MOEA/D} with Virtual Overlap Zone and Exclusively Evaluated Mating}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {395--398}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {MOEA/D, parallel and distributed processing, many-core environment, multi-objective evolutionary algorithms, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654414}, size = {4 pages}, abstract = {In this paper, we propose an index of the optimal overlapping zone breadth for parallel and distributed MOEA/D with a virtual overlapping zone and exclusively evaluated mating. The proposed index aims to address the sparse areas that occur when the T-neighborhood is divided for parallelization computation. Increasing the overlapping zone breadth reduces the sparse areas on the Pareto solutions near the zone boundary and enhances the Hypervolume (HV) value but increases the computation time. To determine the optimal index of optimal overlapping zone breadth, we investigated the HV value and processing time when the number of censored generations was fixed and the T neighborhood size and overlapping zone breadth were changed. We also conducted an experiment to examine the HV value per unit time by varying the degree of parallelism and the breadth of overlap as parameters. Experiments on a constrained knapsack problem show that setting the overlapping zone breadth close to half of the T-neighborhood size not only reduces sparse areas of the Pareto-optimal front but also enhances the HV value.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zou:2024:GECCOcomp, author = {Juan Zou and Tianbin Xie and Qi Deng and Xiaozhong Yu and Shengxiang Yang and Jinhua Zheng}, title = {Differential Evolution based on Local Grid Search for Multimodal Multiobjective Optimization with Local Pareto Fronts}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dimo Brockhoff and Tapabrata Ray}, pages = {399--402}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multimodal multiobjective optimization, local pareto fronts, differential evolution, local grid search, Evolutionary Multiobjective Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654235}, size = {4 pages}, abstract = {Multimodal multiobjective optimization problems (MMOPs) are characterized by multiple Pareto optimal solutions corresponding to the same objective vector. MMOPs with local Pareto fronts (MMOPLs) are common in the real world. However, existing multimodal multiobjective evolutionary algorithms (MMEAs) face significant challenges in finding both global and local Pareto sets (PSs) when dealing with MMOPLs. For this purpose, we propose a differential evolution algorithm based on local grid search, called LGSDE. LGSDE establishes a local grid region for each solution, achieving a balanced distribution by judging the dominant relationship only among solutions within that local region. This approach enables the population to converge towards both global and local PSs. We compare LGSDE with other state-of-the-art MMEAs. Experimental results demonstrate LGSDE exhibits superiority in addressing MMOPLs.}, notes = {GECCO-2024 EMO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cenikj:2024:GECCOcomp, author = {Gjorgjina Cenikj and Gasper Petelin and Tome Eftimov}, title = {{TransOptAS:} {Transformer-Based} Algorithm Selection for {Single-Objective} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Katherine Malan and Youhei Akimoto}, pages = {403--406}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm selection, single-objective continuous optimization, representation learning, problem landscape features, Evolutionary Numerical Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654191}, size = {4 pages}, abstract = {Driven by the success of transformer models in representation learning across different Machine Learning fields, this study explores the development of a transformer-based Algorithm Selection (AS) model. We demonstrate the training of a transformer architecture for AS on a collection of single-objective problem instances generated through a random function generator. The transformer model predicts a numeric performance indicator for all of the algorithms in a given algorithm portfolio, relying solely on raw samples from the objective functions. The transformer model consistently outperforms a single best solver, indicating that AS can be performed without the need for intermediate feature construction. A comparison to the classic Exploratory Landscape Analysis (ELA) approach shows that the transformer model provides comparable results, with the ELA model commonly outperforming the transformer model with differences around 0.02 in terms of the loss metric.}, notes = {GECCO-2024 ENUM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(li:2024:GECCOcomp2, author = {Xiaoyue Li and Timo Koetzing}, title = {Algorithm Performance Comparison for {Integer-Valued} {OneMax}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Katherine Malan and Youhei Akimoto}, pages = {407--410}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {CMA-ES, discrete search space, RLS, Evolutionary Numerical Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654287}, size = {4 pages}, abstract = {Recently, several continuous-domain optimizers have been employed to solve mixed-integer black box optimization (MI-BBO) problems by adjusting them to handle the discrete variables as well. In this work we want to compare how these adjusted algorithms perform on purely discrete variables when compared with algorithms designed to handle discrete domains.We use the algorithm RLSalpha,beta from the literature, which was analyzed theoretically on a specific problem class. We experimentally optimize the parameters alpha and beta for further analysis.Second, we make a comprehensive comparison of RLSalpha,beta with algorithms from the continuous domain on a generalization of OneMax to Zn. We find that RLSalpha,beta shows better performance on this discrete benchmark functions than the other algorithms.Overall, these results show that adaptations of continuous algorithms are not suited for MI-BBO problems, and that research combining the strengths of both approaches is more promising.}, notes = {GECCO-2024 ENUM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(moradi:2024:GECCOcomp, author = {Behzad Moradi and Michael Kirley and Mario Andres {Munoz Acosta}}, title = {Sensitivity Analysis of Surrogate-assisted Bilevel Optimisation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Katherine Malan and Youhei Akimoto}, pages = {411--414}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {bilevel optimisation, evolutionary strategy, surrogate modelling, Evolutionary Numerical Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654228}, size = {4 pages}, abstract = {Bilevel optimisation problems consist of two interactive optimisation tasks, where an upper-level task must be solved subject to the optimality of a lower-level task. Recently, a range of surrogateassisted bilevel evolutionary algorithms have been proposed, where various approximation models have been used to substitute each of the upper- or lower-level objective functions, and in some cases, the mappings between the levels. These algorithms are highly engineered systems where alternative surrogate models with varying parameters are used, often combined with other mechanisms such as local search and/or knowledge sharing. Consequently, it is difficult to isolate the effect that the surrogate model has on performance. In this paper, we address this issue. Starting from a nested optimisation algorithm - a bilevel Covariance Matrix Adaptation Evolutionary Strategy as a baseline - we systematically introduce surrogate models at the upper-level, lower-level, and both levels simultaneously, as well as for the mapping between the levels. Using a suite of benchmark problems, we scrutinise algorithm performance. The results show the acute sensitivity of performance to the objective function or mapping being modelled within the hierarchical structure. We note that, in most test problems, smaller computation costs are evident when modelling the lower-level objective function.}, notes = {GECCO-2024 ENUM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nguyen:2024:GECCOcomp, author = {Duc Manh Nguyen}, title = {A Combination of {CMA-ES} with Probability Distributions of Integer Variables for {Mixed-Integer} {Black-Box} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Katherine Malan and Youhei Akimoto}, pages = {415--418}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary computation, CMA-ES, mixed-integer black-box optimization, Evolutionary Numerical Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654220}, size = {4 pages}, abstract = {In this paper, we propose a method for efficiently solving the mixed-integer black-box optimization problem by utilizing the probability distribution models of integer variables in the CMA-ES algorithm. Firstly, some elite points among the generated ones during the evolution process of the CMA-ES algorithm are collected after some consecutive iterations to successively build the probability distribution model of integer variables. Then, the model is partially used to generate the values for the integer variables of candidate solutions in some next iterations. The numerical experiments on the MI-BBO benchmark problems will show that the probability distribution models of integer variables can guide the CMA-ES better to the optimal solution of the problem, as well as demonstrate the efficiency of the proposed method.}, notes = {GECCO-2024 ENUM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(petelin:2024:GECCOcomp, author = {Gasper Petelin and Gjorgjina Cenikj}, title = {On Generalization of {ELA} Feature Groups}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Katherine Malan and Youhei Akimoto}, pages = {419--422}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {algorithm selection, exploratory landscape analysis, evolutionary computation, black-box optimization, Evolutionary Numerical Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654124}, size = {4 pages}, abstract = {Algorithm selection, i.e., selecting the most suitable algorithm for a specific problem, is a vital task in continuous black-box optimization. A popular strategy used to address this task is to characterize optimization functions using Exploratory Landscape Analysis (ELA) features, which are then utilized to train a machine learning meta-model to select the appropriate algorithm for that function. A significant challenge with meta-models trained on current benchmarks is their often restricted ability to effectively generalize to new functions, limiting their practical application. In this study, we investigate which ELA feature groups are the best at generalizing to previously unseen functions when performing algorithm selection. Using the Comparing Continuous Optimizers functions, novel functions are generated through affine recombinations of existing functions. For each ELA feature group, a meta-model is developed on these functions, enabling it to rank various optimization algorithms. Subsequently, these trained meta-models are assessed using functions that are increasingly out-of-distribution to what was observed during training. We show that most ELA feature groups do not generalize well to out-of-distribution functions, implying reduced effectiveness of selecting algorithms for unfamiliar functions. In such situations, meta-models using different ELA features for algorithm ranking often do not outperform basic predictions based on average ranks.}, notes = {GECCO-2024 ENUM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhang:2024:GECCOcomp4, author = {Haotian Zhang and Jialong Shi and Jianyong Sun and Ali Wagdy Mohamed and Zongben Xu}, title = {A Gradient-based Method for Differential Evolution Parameter Control by Smoothing}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Katherine Malan and Youhei Akimoto}, pages = {423--426}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {differential evolution, parameter control, learning to optimize, Evolutionary Numerical Optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654185}, size = {4 pages}, abstract = {Differential evolution (DE) is one of the most studied algorithms in evolutionary computation. However, the parameters in DE need to be tuned carefully, which costs much computational resources. The reason is that the basic paradigm of DE (mutation, crossover, bound constraint and selection) contains non-differentiable operators. In this paper, we propose a DE paradigm called "smoDE" for the first time by smoothing the crossover operator and the bound constraint operator to make them differentiable with respect to the parameters. The experiments show that we can tune the parameters of smoDE by gradient descent with much fewer computational resources than commonly used tools such as the Bayesian optimization algorithm (BOA). Then we analyze the population diversity of smoDE theoretically and prove that smoDE can converge faster than DE. A simple experiment also validates that. We further propose the "ada-smoDE" by embedding a neural network in smoDE to output parameters of smoDE adaptively and test ada-smoDE on the CEC 2018 test suite. The results show that ada-smoDE can perform competitively on the whole test suite and significantly better than DE on some problems.}, notes = {GECCO-2024 ENUM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(garbus:2024:GECCOcomp, author = {Jack Garbus and Thomas Willkens and Alexander Lalejini and Jordan Pollack}, title = {Accelerating {Co-Evolutionary} Learning Through {Phylogeny-Informed} Interaction Estimation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {427--430}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, phylogeny, neural networks, co-evolution, outcome estimation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654250}, size = {4 pages}, abstract = {Co-evolution is a powerful problem-solving approach. However, fitness evaluation in co-evolutionary algorithms can be computationally expensive, as the quality of an individual in one population is often determined by its interactions with many (or all) members of another population. To accelerate co-evolutionary systems, we introduce phylogeny-informed interaction estimation, which uses runtime phylogenetic analysis to estimate interaction outcomes between individuals based on how their relatives performed against each other. In addition, we introduce a method for subsampling interactions which improves the accuracy of our estimates. We test our method on sorting networks using lexicase selection, and demonstrate that interaction estimation can halve the computation required to make progress in co-evolutionary search.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(he:2024:GECCOcomp, author = {Jie He and Yaohong Zhang and Ji Wang and Xiaoqing Li and Weidong Bao}, title = {{Hypernetwork-Based} {Multi-Objective} Optimization with {NSGA-II}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {431--434}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, multi-objective optimization, NSGA-II, crowing-distance assignment, Genetic Algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654189}, size = {4 pages}, abstract = {Multi-objective optimization algorithms have a wide range of applications in various fields. NSGA-II (Non-dominated Sorting Genetic Algorithm II) is a well-known multi-objective optimization algorithm. However, when solving high-dimensional problems using NSGA-II, the algorithm complexity increases linearly with the problem dimension. To address this issue, this paper proposes the HNSGA-II (Hypernetwork Non-dominated Sorting Genetic Algorithm II) algorithm based on hypernetwork modeling. The HNSGA-II algorithm introduces a binary tree data structure and uses hypernetworks to model candidate solutions. A new crowding calculation is designed to improve the algorithm's convergence. Horizontal and vertical comparative experiments were conducted on standard test sets ZDT and BT using the newly proposed algorithm, NSGA-II, MOEAD, and other algorithms. The results demonstrate that the HNSGA-II algorithm achieves a maximum improvement of about 10\% in time efficiency when solving high-dimensional problems and exhibits better algorithmic convergence than the original NSGA-II algorithm.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(inan:2024:GECCOcomp, author = {Toki Tahmid Inan and Amarda Shehu}, title = {Revisiting Evolutionary Algorithms for Optimization for Deep Learning: Introducing {DL-HEA:} {EAs} for Optimization for Deep Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {435--438}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {deep learning, optimization, hybrid evolutionary algorithms, Genetic Algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654341}, size = {4 pages}, abstract = {Despite the central role that optimization plays for deep learning in the model training phase, evolutionary-inspired approaches are largely under-represented in the literature. This paper proposes a Deep Learning Hybrid Evolutionary Algorithm (DL-HEA) which integrates a gradient-, mini batch-based local search operator within an evolutionary computing framework. DL-HEA shows competitive performance in optimization effectiveness and generalization capability. Findings suggest that hybrid evolutionary algorithms hold promise for addressing challenges posed by non-convex optimization for deep learning, offering a compelling alternative to Stochastic Gradient Descent in benchmarked settings and a way forward for novel optimization algorithms for deep learning.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(jain:2024:GECCOcomp, author = {Neha Jain and Andrew Hornback and May Dongmei Wang}, title = {{EMxDesign:} A Genetic Algorithm for High Affinity Drug Design}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {439--442}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, drug discovery, computer-aided drug design, cheminformatics, computational chemistry, evolutionary algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654423}, size = {4 pages}, abstract = {As pathogens are rapidly evolving to evade modern drugs, it is vital to design efficient, optimal treatments in a timely manner. One way to achieve this is by mimicking the process of evolution when designing drugs most fit to combat pathogens of interest through the use of genetic evolutionary algorithms. In this work, we introduce EMxDesign (Element Molecule Exchange Drug Design), a genetic algorithm that aims to optimize drug-pathogen affinity by inducing single element mutations in cross-overs of existing molecular drug structures. Using this approach, EMxDesign generates high affinity drug molecules while circumventing problems of existing genetic algorithms for drug design such as requiring specific hyperparameter tuning for different protein targets. We used EMxDesign to generate drug molecules for eighteen SARS-CoV-2 protein pocket targets, and found after just one iteration our newly generated molecules had significantly higher mean affinity for thirteen of our target pockets. We are currently refining this framework to launch an application to aid in drug development at scale.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(leither:2024:GECCOcomp, author = {Sydney Leither and Vincent Ragusa and Emily Dolson}, title = {Evolving Weighted and Directed Graphs with Constrained Properties}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {443--446}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {random graph models, complex networks, genetic algorithms, multi-objective optimization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654350}, size = {4 pages}, abstract = {Research across a range of fields sometimes requires graphs with specific properties or structures for purposes such as hypothesis testing and simulation. Random graph models are the current state-of-the-art approach; they generate distributions of graphs with certain properties. Unfortunately, random graph models can only generate specific classes of graphs. We present a genetic algorithm that evolves weighted and directed graphs with arbitrary properties specified by the user. Using NSGA-II, we evolve a population of graphs by treating each of the specified graph properties as an objective to optimize. We propose a modified version of the diversity maintenance component of NSGA-II and several graph-targeted mutation and crossover operators. Our algorithm successfully evolves graphs with multiple interacting properties while maintaining diversity in the properties not under selection. This tool allows for highly customizable and fine-grained generation of graphs with arbitrary properties.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pandey:2024:GECCOcomp2, author = {Hari Mohan Pandey}, title = {Genetic Algorithm with Modified Reproduction Operators for Grammatical Inference}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {2135--2138}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {context free grammar, evolutionary computation, genetic algorithm, grammar inference, minimum description length principle, Genetic Algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654151}, size = {4 pages}, abstract = {A grammatical inference (GI) algorithm is proposed, that utilizes a Genetic Algorithm (GA), in conjunction with a pushdown automaton (PDA) and the principle of minimum description length (MDL). GI is a methodology to infer context-free grammars (CFGs) from training data. It has wide applicability across many different fields, including natural language processing, language design, and software engineering. GAs is a search methodology that has been used in many domains and we utilize GAs as our primary search algorithm. The proposed algorithm incorporates a Boolean operator-based crossover and mutation operator with a random mask. Here, Boolean operators (AND, OR, NOT, and XOR) are applied as a diversification strategy. A PDA simulator is implemented to validate the production rules of a CFG. The performance is evaluated against state-of-the-art algorithms. Statistical tests demonstrate the superiority of the proposed algorithm over the algorithms implemented in this paper.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rajwar:2024:GECCOcomp, author = {Kanchan Rajwar and Yogesh Kumar and Kusum Deep}, title = {Investigating Structural Bias in {Real-Coded} Genetic Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {447--450}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithm, real-coded genetic algorithms (RCGAS), structural bias, BIAS toolbox, generalized signature test, Genetic Algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654312}, size = {4 pages}, abstract = {Real-Coded Genetic Algorithms (RCGAs) are a significant area of focus in evolutionary algorithm research. Structural bias (SB), a recently recognized attribute in metaheuristic algorithms, leads populations to repeatedly exploit specific region(s) of the search space without acquiring new information. This behavior not only escalates the computational cost but also slows the convergence rate. While recent studies have revealed an inclination toward the center of the search space in basic Genetic Algorithms (GA), similar investigations for RCGA variants remain unexplored. This study addresses this gap by examining the bias in popular and widely used crossovers and mutations in RCGAs. The BIAS toolbox, a recently developed tool, is used for a preliminary assessment of biases in these variants. Furthermore, GST, a simple yet effective methodology, is utilized for a more thorough analysis, quantifying, and scrutinizing biases in RCGA variations. The outcomes of this research aim to deepen our theoretical understanding of RCGAs, thus enhancing their practical applications and contributing to the future development of more equitable and unbiased algorithms.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(roberts:2024:GECCOcomp, author = {Graham Roberts and Everett Grethel and Qian Yang}, title = {Implicit Symbolic Regression via Probabilistic Fitness}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {451--454}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, fitness evaluation, implicit equations: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654357}, size = {4 pages}, abstract = {Discovery of interpretable models from data is an important challenge, especially for learning physical relationships in scientific applications. Symbolic regression is a key method for extracting concise mathematical expressions to model these relationships. However, existing symbolic regression methods are primarily designed for explicit functions, and encounter challenges when attempting to learn implicit equations. Methods for recovering non-trivial implicit equations typically depend on numerical gradients, which require larger amounts of densely sampled training data and is not robust to noise. We develop a new, simple-to-use method for symbolic regression that enables learning of implicit equations, even with noisy training data, which to the best of our knowledge has not been widely feasible previously. Our method is also able to identify independent implicit relationships given known ones. Our approach is based on the formulation of a probabilistic fitness function using the Kullback-Leibler divergence, and is compatible with existing symbolic regression algorithms. We demonstrate our method on a series of benchmark implicit equations, as well as on a dynamical system with conserved quantities. The ability to recover implicit relationships from data, even in the presence of noise, has the potential to transform commonly used approaches for modeling in the physical sciences and beyond.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yang:2024:GECCOcomp2, author = {Yudong Yang and Kai Wu and Xiangyi Teng and Handing Wang and He Yu and Jing Liu}, title = {Exploring Knowledge Transfer in Evolutionary Many-task Optimization: A Complex Network Perspective}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {455--458}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary many-task optimization, knowledge transfer, complex network, Genetic Algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654232}, size = {4 pages}, abstract = {The field of evolutionary many-task optimization (EMaTO) is increasingly recognized for its ability to streamline the resolution of optimization challenges with repetitive characteristics, thereby conserving computational resources. This paper tackles the challenge of crafting efficient knowledge transfer mechanisms within EMaTO, a task complicated by the computational demands of individual task evaluations. We introduce a novel framework that employs a complex network to comprehensively analyze the dynamics of knowledge transfer between tasks within EMaTO. By extracting and scrutinizing the knowledge transfer network from existing EMaTO algorithms, we evaluate the influence of network modifications on overall algorithmic efficacy. Our findings indicate that these networks are diverse, displaying community-structured directed graph characteristics, with their network density adapting to different task sets. This research underscores the viability of integrating complex network concepts into EMaTO to refine knowledge transfer processes, paving the way for future advancements in the domain.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhuang:2024:GECCOcomp, author = {Feng Zhuang and Ting Huang and He Yu and Jing Liu}, title = {An Empirical Study of Constrained Evolutionary Algorithms for Isomorphic Connected Graph Segmentation Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Aneta Neumann and Elizabeth Wanner}, pages = {459--462}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {constrained optimization problem, evolutionary computation, constraint-handling technique, grid graph segmentation problem, Genetic Algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654247}, size = {4 pages}, abstract = {Real-world optimization problems often involve a variety of constraints. Evolutionary algorithms (EAs) are commonly employed as a prevalent approach for addressing the constrained optimization problems (COPs) by incorporating constraint-handling techniques such as penalty functions. However, these methods cannot effectively solve the COPs with challenging constraints. This paper conducts a comparative study to evaluate three constrained EA variants for solving a challenging COP of anchored isomorphic connected graph segmentation, focusing on achieving connectivity and non-overlapping requirements. The first two commonly used EA variants with penalty functions fail to guide the candidate populations towards feasible solutions. To increase the candidate feasibility and search efficiency, we propose an EA with feasibility assurance strategy that incorporates an anchor diffusion initialization strategy to ensure the connectivity and non-overlapping of initial solutions. Furthermore, it employs edge erosion mutation strategy to maintain subgraph similarity. Therefore, the proposed algorithm not only rapidly identifies feasible candidates but also efficiently converges from an initial feasible solution to the global optimum. Through experimental comparisons, we demonstrate that the proposed feasibility-assured EA approach consistently outperforms the commonly used constrained EAs. Even in cases where the other methods fail to find any feasible solution, the proposed algorithm can identify the optimal solution.}, notes = {GECCO-2024 GA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(gallotta:2024:GECCOcomp, author = {Roberto Gallotta and Antonios Liapis and Georgios N. Yannakakis}, title = {Dynamic {Quality-Diversity} Search}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Alberto Moraglio and James McDermott}, pages = {463--466}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {General Evolutionary Computation, Hybrids: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654257}, size = {4 pages}, abstract = {Evolutionary search via the quality-diversity (QD) paradigm can discover highly performing solutions in different behavioural niches, showing considerable potential in complex real-world scenarios such as evolutionary robotics. Yet most QD methods only tackle static tasks that are fixed over time, which is rarely the case in the real world. Unlike noisy environments, where the fitness of an individual changes slightly at every evaluation, dynamic environments simulate tasks where external factors at unknown and irregular intervals alter the performance of the individual with a severity that is unknown a priori. Literature on optimisation in dynamic environments is extensive, yet such environments have not been explored in the context of QD search. This paper introduces a novel and generalisable Dynamic QD methodology that aims to keep the archive of past solutions updated in the case of environment changes. Our Dynamic QD intervention is applied on MAP-Elites and CMA-ME, two powerful QD algorithms, and we test their performance on a dynamic variant of the well-known lunar lander environment.}, notes = {GECCO-2024 GECH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(gaylinn:2024:GECCOcomp, author = {Nathan Gaylinn and Joshua Bongard}, title = {A {Meta-Evolutionary} Algorithm for Co-evolving Genotypes and Genotype / Phenotype Maps}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Alberto Moraglio and James McDermott}, pages = {467--470}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, evolutionary computation, evolution strategies, gp map, generative encoding, cellular automata, game of life, evolvability, diversity, exploration, CPPNs, development, General Evolutionary Computation, Hybrids: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654259}, size = {4 pages}, abstract = {Evolutionary computation (EC) is often used to automatically discover solutions to optimization problems. It is valued because it allows the programmer to intuitively design a search space to fit a task, and because it is a relatively open-ended search process that favors diversity and unanticipated solutions that might be missed with gradient-based methods. Traditionally, the programmer decides on a fixed search strategy a priori, often by designing a specialized mapping from genotype to phenotype (GP map). Unfortunately, this can introduce bias and undermine the open-endedness of EC. Evolved GP maps can mitigate these concerns by automatically discovering efficient search spaces that improve evolvability. However, most research into evolved GP maps emphasizes convergence rate to a fit solution, or rate of recovery after a change in conditions. Here, we frame EC as a search over search strategies rather than a search for fit solutions. We demonstrate that a single meta-evolutionary algorithm with an evolved generative GP map can find better solutions to multiple fitness functions in the domain of 2D cellular automata than a traditional evolutionary algorithm. In the future, we hope these results will further the understanding of evolvability, its relationship to diversity, and the exploratory power of evolved GP maps.}, notes = {GECCO-2024 GECH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(komosinski:2024:GECCOcomp, author = {Maciej Komosinski and Marcin Leszczynski and Konrad Miazga and Dawid Siera}, title = {Deficiencies of Best-chromosome-wins Dominance in Evolutionary Optimization of Stationary Functions}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Alberto Moraglio and James McDermott}, pages = {471--474}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms, diploidy, polyploidy, dominance, General Evolutionary Computation, Hybrids: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654361}, size = {4 pages}, abstract = {In evolutionary computation, diploid genotypes (i.e., genotypes with two chromosomes) are traditionally used mostly in the context of optimization of non-stationary problems. Recent research, however, suggested that the use of diploid genotypes with mechanisms such as best-chromosome-wins can improve the performance of evolutionary algorithms even for stationary problems. In this paper we test the effectiveness of diploidy and polyploidy (i.e., genotypes with more than two chromosomes) with best-chromosome-wins on mathematical benchmarks. We verify the effect and the importance of the crossover operator on the behavior of evolutionary algorithms with diploidy and polyploidy. We explore the inner workings of evolutionary algorithms with diploidy and polyploidy in order to better understand their performance. We find that the results reported in previous papers on the best-chromosome-wins dominance for stationary functions may have been overly optimistic, and the use of diploidy with best-chromosome-wins does not enhance the search process for such functions.}, notes = {GECCO-2024 GECH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nitschke:2024:GECCOcomp2, author = {Geoff Nitschke and Bilal Aslan}, title = {{Morpho-Material} Evolution for Automated Robot Design}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Alberto Moraglio and James McDermott}, pages = {475--478}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary robotics, multi-level evolution, General Evolutionary Computation, Hybrids: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654413}, size = {4 pages}, abstract = {Multi-Level Evolution (MLE) has been demonstrated for effective robot designs using a bottom-up approach, first evolving which materials to use for modular components and then how these components are connected into a functional robot design. This paper evaluates hierarchical MLE robotic design, as an evolutionary design method on various task (robot ambulation) environments in comparison to human designed robots (pre-designed robot controller-morphology couplings). Results indicate that the MLE method evolves robots that are effective across increasingly difficult (locomotion) task environments, out-performing pre-designed robots, and thus provide further support for the efficacy of MLE as an evolutionary robotic design method. Furthermore, results indicate the MLE method enables the evolution of suitable robotic designs for various environments, where such designs would be non-intuitive and unlikely in conventional robotic design.}, notes = {GECCO-2024 GECH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(tian:2024:GECCOcomp, author = {Erjie Tian and Xin Zhang and Pengjiang Qian and Wei Fang and Jian Yao}, title = {Variable {Contribution-Based} Differential Evolution for {Large-Scale} Global Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Alberto Moraglio and James McDermott}, pages = {479--482}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {differential evolution (DE), evolutionary computation, large-scale optimization, resource allocation, General Evolutionary Computation, Hybrids: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654219}, size = {4 pages}, abstract = {Large-scale optimization problems bring significant challenges for existing evolutionary algorithms, as their search capability cannot efficiently balance the contribution of numerous dimensions. Differential evolution (DE), due to its simplicity and efficiency, has been successfully applied to large-scale optimization problems. However, in many conventional DEs, all variables are treated equally, regardless of their contribution to the overall function value. Therefore, this paper proposes a variable contribution-Based DE to explore the search space efficiently by variables with higher contribution. Specifically, a novel estimation method is introduced to quantify the accumulated contribution of variables by evaluating the fitness improvement achieved in each successful evolution. The contribution information is then utilized to reallocate computing resources among variables. In this way, more computing resources will be allocated to variables with higher contribution, which can significantly speed up the convergence. Comprehensive experiments are conducted on the large-scale optimization benchmarks CEC 2013 and four state-of-the-art evolutionary algorithms designed for large-scale optimization. The results indicate that the proposed algorithm achieves competitive results compared with the state-of-the-art algorithms.}, notes = {GECCO-2024 GECH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp4, author = {Yi Wang and Wenshan Li and Tao Li}, title = {Adaptive Immune Optimization Algorithm: Combining Mutual Information and Differential Evolution-based Mutation for Local Feature Selection on Microarray Data}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Alberto Moraglio and James McDermott}, pages = {483--486}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {local feature selection, immune algorithm, evolutionary computation, gene feature selection, high-dimensional feature selection, General Evolutionary Computation, Hybrids: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654412}, size = {4 pages}, abstract = {Microarray data, characterized by high dimensionality and small sample sizes, poses significant challenges in identifying genes relevant for disease classification and prognosis. Our study proposes a one-stage filter local feature selection based on an immune algorithm to efficiently select relevant features in microarray data. It embeds two filter-based feature selection methods into an improved discrete immune algorithm and allocates feature subsets for different regions with considering local sample behaviors. The experimental results demonstrate the superiority of our method compared with well-known global and local feature selection methods on five microarray datasets.}, notes = {GECCO-2024 GECH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(boldi:2024:GECCOcomp3, author = {Ryan Boldi and Ashley Bao and Martin Briesch and Thomas Helmuth and Dominik Sobania and Lee Spector and Alexander Lalejini}, title = {A Comprehensive Analysis of Down-sampling for Genetic Programming-based Program Synthesis}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {487--490}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654134}, size = {4 pages}, abstract = {Genetic programming systems typically require large computational resource investments for training-set evaluations. Down-sampling these sets has proven to decrease costs and improve problem-solving success, particularly with the lexicase parent selection algorithm. We investigated its effectiveness when applied to three other common selection methods and across various program synthesis problems. Our findings show that down-sampling notably enhances all three methods, indicating its potential broad applicability. Additionally, we found informed down-sampling to be more successful than its random counterpart, particularly in selection schemes maintaining diversity like lexicase selection. We conclude that down-sampling is a promising strategy for test-based genetic programming problems, irrespective of selection scheme.This paper is a comprehensive extension of a previous poster paper [1].}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chitty:2024:GECCOcomp, author = {Darren M. Chitty and Ed Keedwell}, title = {Greedy Strategies to Improve Phased Genetic Programming When Applied Directly to the Traveling Salesman Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {491--494}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, optimisation, greedy methods, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654358}, size = {4 pages}, abstract = {Genetic Programming (GP) can be applied directly to combinatorial optimisation problems such as the Traveling Salesman Problem (TSP) using a phased approach. Similar to hyper-heuristics, Phased-GP evolves a program of simple operators to apply to a solution to improve it whilst operating in phases to facilitate hill-climbing. However, as optimality is approached, evolving a program of multiple operations that are not detrimental to solution quality is unlikely. Although, it can be hypothesized that if Phased-GP operates in a greedy manner, the probability of improving a near optimal solution is much greater. Two greedy Phased-GP strategies are proposed. First, using greedy GP operators which can only improve current solution quality. Second, a greedy program strategy whereby only the aspect of a GP program that provides best solution quality is retained. Combining both strategies reduced relative errors by up to a further 6\% obtaining solutions within 7\% of optimal when applied to TSPs of several thousand cities.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cornejo-maceda:2024:GECCOcomp, author = {Guy Y. {Cornejo Maceda} and Zhutao Jiang and Francois Lusseyran and Bernd R. Noack}, title = {Fast {Self-Learning} of Turbulence Feedback Laws Using {Gradient-Enriched} Machine Learning Control}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {495--498}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, hybrid method, feedback control, downhill simplex, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654395}, size = {4 pages}, abstract = {We apply genetic programming (GP) to solve the most complex turbulence control problems. We simultaneously optimize up to dozens of parameters and dozens of control laws, listening to dozens of sensor signals with 100 to 1000 short test runs. Unlike reinforcement learning, our implementation of GP does not require any meta parameter tuning and is many orders of magnitudes faster than vanilla versions thanks to numerous enablers: 1. Parameter optimization over many dozens of experiments and simulations. 2. Smart formulation of control laws by preprocessing inputs and outputs. 3. Gradient-enriched simplex optimization of promising subspace. This work focuses on the resulting algorithm, referred to as gradient-enriched Machine Learning Control (gMLC). The applications comprise learning a multiple-input multiple-output control law to stabilize the flow past a cluster of three rotating cylinders in numerical simulations and the stabilization of the shear layer in an open cavity flow experiment. The learning acceleration achieved by gMLC opens the path to complex and time-limited experiments, including evaluation in varying operating conditions and optimization of distributed-input distributed-output control laws, i.e., functions with O(100) inputs and outputs.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(de-la-torre:2024:GECCOcomp, author = {Camilo {De La Torre} and Kevin Cortacero and Sylvain Cussat-Blanc and Dennis Wilson}, title = {Multimodal Adaptive Graph Evolution}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {499--502}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, cartesian genetic programming, evolutionary computation, symbolic regression, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654347}, size = {4 pages}, abstract = {The problem of program synthesis involves automatically finding a function based on evaluation criteria, like matching input-output pairs. While Cartesian Genetic Programming (CGP) has excelled in various function synthesis tasks, it has primarily been limited to single data types, hindering its applicability to diverse data. Mixed-Type CGP, proposed in 2012, aimed to address this limitation but faced challenges due to search space limitations and complexity in building function libraries. In this study, we introduce Multimodal Adaptive Graph Evolution (MAGE), a generalized CGP extension that integrates functions of different data types by grouping them accordingly and imposing mutation constraints based on type. Through comparisons with standard CGP and Mixed-Type CGP on Program Synthesis Benchmark and image classification tasks, we demonstrate that MAGE's representation and mutation constraints facilitate the search for multimodal functions.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(jia:2024:GECCOcomp2, author = {Kaixuan Jia and Jianbin Ma and Xiaoying Gao and Jiaxin Niu}, title = {Archive-based multiple feature construction methods using adaptive Genetic Programming}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {503--506}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, feature construction, adaptive, multiple feature, classification, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654239}, size = {4 pages}, abstract = {The quality of features is an important factor that affects the classification performance of machine learning algorithms. Feature construction based on Genetic Programming (GP) can automatically create more discriminative features, sometimes greatly improving classification performance. However, insufficient information caused by constructing a single feature or constructing only a few features can affect the classification performance of feature construction. In addition, GP may fall into premature convergence, which also affects classification performance. This paper proposes an archive-based multiple feature construction method which uses elite archive strategy to preserve and select effective constructed features, and employs an adaptive strategy for GP to adjust the crossover and mutation probabilities based on fitness values. Experiments on ten datasets show that our proposed archive-based multiple feature construction method without using adaptive GP can significantly improve the classification performance compared with traditional single feature construction method, and the classification performance can be maintained or further improved by adding the adaptive strategy. The comparisons with three state-of-the-art techniques show that our proposed methods can significantly achieve better classification performance.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kowalczykiewicz:2024:GECCOcomp, author = {Michal Kowalczykiewicz and Piotr Lipinski}, title = {Introducing vectorized cluster crossover in Grammatical Evolution}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {507--510}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, grammatical evolution, source code vector embeddings, latent vector representations, code2vec, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654359}, size = {4 pages}, abstract = {This paper concerns machine learning approaches for software engineering, in particular for discovering hidden knowledge from data by learning latent vector representations of source code, and incorporating it to evolutionary algorithms evolving computer programs by improving their crossover operators. It focuses on Grammatical Evolution, as an example of an evolutionary algorithm aiming at constructing a computer program, and code2vec, as a technique of discovering an efficient latent vector code representation. We propose a code2vec-based crossover operator that outperformed the regular one in computational experiments with a number of benchmark computer programs.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lalejini:2024:GECCOcomp, author = {Alexander Lalejini and Marcos Sanson and Jack Garbus and Matthew Andres Moreno and Emily Dolson}, title = {Runtime phylogenetic analysis enables extreme subsampling for test-based problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {511--514}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, subsampling, selection schemes, lexicase selection, phylogenetic analysis, test-based problems, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654208}, size = {4 pages}, abstract = {A phylogeny describes a population's evolutionary history. Evolutionary search algorithms can perfectly track the ancestry of candidate solutions, illuminating a population's trajectory through the search space. We introduce phylogeny-informed subsampling, a new class of subsampling methods that exploit runtime phylogenetic analyses for solving test-based problems. Specifically, we assess two phylogeny-informed subsampling methods---individualized random subsampling and ancestor-based subsampling---on ten genetic programming (GP) problems from program synthesis benchmark suites. Overall, we find that phylogeny-informed subsampling methods enable problem-solving success at extreme subsampling levels where other subsampling methods fail. For example, phylogeny-informed subsampling methods more reliably solved program synthesis problems when evaluating just one training case per-individual, per-generation. However, at moderate subsampling levels, phylogeny-informed subsampling generally performed no better than random subsampling on GP problems. Continued refinements of phylogeny-informed subsampling techniques offer a promising new direction for scaling up evolutionary systems to handle problems with many expensive-to-evaluate fitness criteria.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lavinas:2024:GECCOcomp2, author = {Yuri Lavinas and Nathaniel Haut and William Punch and Wolfgang Banzhaf and Sylvain Cussat-Blanc}, title = {Dynamically Sampling biomedical Images For Genetic Programming}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {515--518}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, cartesian genetic programming, image processing, active learning, data sampling, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654202}, size = {4 pages}, abstract = {In this contribution we study how to effectively evolve programs tailored for biomedical image segmentation by using an Active Learning approach in Cartesian Genetic Programming (CGP). Active Learning allows to dynamically select training data by identifying the most informative next image to add to the training set. We study how different metrics for selecting images under active learning impact the searchability of CGP. Our results show that datasets built during evolution with active learning improve the performance of Cartesian GP substantially. In addition, we found that the choice of the particular metric used for selecting which images to add heavily impacts convergence speed. Our work shows that the right choice of the image selection metric positively impacts the effectiveness of the evolutionary algorithm.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(li:2024:GECCOcomp3, author = {Qiaoman Li and Xiaoying Gao and Wenyang Meng and Jianbin Ma}, title = {Multi-objective multi-population Genetic Programming for feature selection and classification to high-dimensional data}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {519--522}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, high-dimensional data, multi-objective, multi-population, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654234}, size = {4 pages}, abstract = {Classification for high-dimensional data is a challenging task due to a great number of redundant and irrelevant features. Genetic Programming (GP) has its built-in feature selection characteristics and is suitable for processing high-dimensional data. However, if the number of features in GP individual is not restricted, the bloat phenomenon will occur, which affects the generalization of the training model. Moreover, GP is easy to fall into local search in the evolutionary process. In this paper, a multi-objective multi-population GP classifier construction method is proposed, which uses a multipopulation co-evolution strategy to divide the evolving population into the main and auxiliary populations with different evolutionary strategies and different evaluation criteria, and employs a multiobjective strategy to ensure classification performance and restrict the number of features at the same time. Experiments on seven high-dimensional datasets show that our proposed multi-population co-evolution strategy and multi-objective strategy are all effective to improve the classification performance of GP classifiers. Comparisons with three state-of-the-art GP classifier construction methods show that our proposed methods achieve better or comparable classification performance on most cases.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(murphy:2024:GECCOcomp, author = {Jared Murphy and Devroop Kar and Joshua Karns and Travis Desell}, title = {{EXA-GP:} Unifying {Graph-Based} Genetic Programming and Neuroevolution for Explainable Time Series Forecasting}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {523--526}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, time series forecasting, graph-based genetic programming, neuroevolution, recurrent neural networks, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654349}, size = {4 pages}, abstract = {This work introduces Evolutionary eXploration of Augmenting Genetic Programs (EXA-GP), which converts the EXAMM neuroevolution algorithm into a graph-based genetic programming (GGP) algorithm by swapping out its library of neurons and recurrent memory cells for genetic programming (GP) operations. This enables EXA-GP to utilize the full suite of optimizations provided by EXAMM, such as distributed and multi-threaded execution, island based populations with re-population events, as well as Lamarckian weight inheritance and backpropagation for optimization of constant values. Results show that EXA-GP's evolved genetic programs achieve the same levels of accuracy as recurrent neural networks (RNNs) evolved by EXAMM, while at the same time being more explainable. EXA-GP's genetic programs are also computationally less complex than EXAMM's RNNs, suggesting that GP operations may actually be more effective than gated recurrent memory cells for time series forecasting. We also show that EXAMM and EXA-GP outperform state-of-the-art GP and recurrent CGPANN algorithms which struggle on these benchmarks.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rabuge:2024:GECCOcomp, author = {Miguel Rabuge and Nuno Lourenco}, title = {A Comparison of Feature Engineering Techniques for Hearing Loss}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {527--530}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, grammatical evolution, feature engineering, audiology, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654212}, size = {4 pages}, abstract = {Hearing Loss (HL) is becoming a concerning problem in modern society. The World Health Organization (WHO) stated in April 2021 that 5\% of the world's population suffers from HL. As more medical data is being gathered, practitioners are trying to help solve medical field-related problems in an automated manner. Therefore, a method to preemptively predict HL is of utter importance. Given the current trend where information of varied types is being collected from multiple sources, a need to select and combine these pieces of information arises when aiming to maximize the prediction of HL.Feature Engineering (FE) is a time-consuming and error-prone task as it is usually made by human experts. In this paper, we aim to automatically boost Machine Learning (ML) models' performance by enhancing original features through evolutionary feature selection and construction. In concrete, we propose the FEDORA, an evolutionary Automated Machine Learning (AutoML) framework for FE. The proposed framework will be compared and analysed against state-of-the-art FE techniques.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(reyes-fernandez-de-bulnes:2024:GECCOcomp, author = {Darian {Reyes Fernandez De Bulnes} and Allan {De Lima} and Aidan Murphy and Douglas {Mota Dias} and Conor Ryan}, title = {Feature Encapsulation by Stages Using Grammatical Evolution}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {531--534}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, grammatical evolution, feature encapsulation, multi-target, multioutput, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654097}, size = {4 pages}, abstract = {This paper introduces a novel mechanism, Feature Encapsulation by Stages (FES), to encapsulate and transfer features as knowledge in a staged manner within the evolutionary process. Encapsulation happens via input space expansion in one or more stages by adding the best-of-run individual as an additional input. This input space expansion is managed by augmenting the grammar. We study the feasibility of dynamically modifying the grammar and reinitialising the population to make way for new individuals which quickly evolve to a better fitness level. Five different approaches to stage management are examined. In addition, three different selection processes, namely, Tournament, Lexicase and Lexi2, are used to investigate which is best suited to use with our encapsulation procedure. We benchmark our procedure on two problem domains, Boolean and Classification, and demonstrate these staging strategies lead to significantly better results. Statistical tests show our FES outperforms the standard baseline in all Boolean problems, with a 4-stage version performing best, obtaining significant differences in all Boolean problems.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(stanovov:2024:GECCOcomp, author = {Vladimir Stanovov}, title = {Designing {Black-Box} Optimizers with {PushGP}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {535--538}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, hyper-heuristic, numerical optimization, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654172}, size = {4 pages}, abstract = {This study is focused on an attempt to design general-purpose black-box population-based numerical optimization algorithms from scratch using the Push language and a genetic programming approach. The designed Push programs are capable of performing operations on vectors, thus allowing creating algorithms suitable for problems of any dimension. The programs are applied to each of the individuals of the population, and the interaction between individuals is possible. The training is performed on the single-objective benchmark on numerical optimization presented within the Congress on Evolutionary Computation 2017. It is shown that with significant computational effort such approach is capable of creating unique optimization methods, which could be competitive to some of the known optimization tools.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(vella-zarb:2024:GECCOcomp, author = {David {Vella Zarb} and Geoff Parks and Timoleon Kipouros}, title = {Synergistic Utilization of {LLMs} for Program Synthesis}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {539--542}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, program synthesis, large language models, evolutionary algorithms, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654426}, size = {4 pages}, abstract = {Advances in Large Language Models (LLMs) have led them to be used as black boxes in several evolutionary algorithms for program synthesis. While these methods tend to be agnostic about which model is used, they only allow for using one. This paper suggests that using a combination of LLMs to seed population-based algorithms introduces more variation and leads to a wider variety of problems that can be solved, due to leveraging the strengths of component LLMs. We test this on the PSB2 suite, using the Search, Execute, Instruct, Debug and Rank (SEIDR) algorithm. In all cases examined, we find that using a combination of LLMs leads to more problems solved and better test pass rates compared to using the best individual model. We also find that the computational cost, as measured in terms of excess programs generated, is lowered.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp5, author = {Chunyu Wang and Qi Chen and Bing Xue and Mengjie Zhang}, title = {Multi-task Genetic Programming with Semantic based Crossover for Multi-output Regression}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {543--546}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, multi-output regression, evolutionary multitask optimization, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654282}, size = {4 pages}, abstract = {Multi-output regression involves predicting two or more target variables simultaneously. In contrast to its single-output counterpart, multi-output regression poses additional challenges primarily because the target variables are frequently interdependent. Achieving accurate predictions for one variable may necessitate a thorough consideration of its relationships with other variables. In this paper, multi-output regression problems are regarded as multi-task optimization problems where predicting one output variable is considered as one task. A new multi-task multi-population genetic programming method is proposed to solve the problem. The method utilizes the semantic based crossover operator to transfer positive knowledge and accelerate convergence. Additionally, it adopts an offspring reservation strategy to keep the quality of the individuals for the corresponding tasks. The empirical results demonstrate that our proposed method significantly enhances the training and the test performances of multi-task multi-population GP and also outperforms standard GP on five real-world multi-output regression datasets.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp6, author = {Shaolin Wang and Yi Mei and Mengjie Zhang}, title = {A Preliminary Counterfactual Explanation Method for Genetic {Programming-Evolved} Rules: A Case Study on Uncertain Capacitated Arc Routing Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {547--550}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, hyper-heuristic, uncertain capacitated arc routing problem, counterfactual explanation, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654192}, size = {4 pages}, abstract = {In this study, we propose a novel method to enhance the interpretability of Genetic Programming Hyper-Heuristics (GPHH) by employing counterfactual explanations for Genetic Programming (GP) evolved rules in dynamic stochastic combinatorial optimisation problems. Focusing on GP-evolved rules, such as dispatching and routing policies, our research tackles the challenge of their complexity and improves user understanding. We illustrate our methodology using the Uncertain Capacitated Arc Routing Problem (UCARP) as a case study. The approach involves analyzing potential candidates in a scenario to understand why some are not selected. We introduce metrics to evaluate the quality of counterfactual explanations and adapt optimization methods to produce them. Additionally, we present a new attribute importance method based on these explanations. This research contributes to enhancing the transparency of GPHH in UCARP and may provide a reference point for future investigations into evolutionary computation and decision-making systems.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wyrwinski:2024:GECCOcomp, author = {Piotr Wyrwinski and Krzysztof Krawiec}, title = {Guiding Genetic Programming with Graph Neural Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {551--554}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, symbolic regression, graph neural networks, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654277}, size = {4 pages}, abstract = {In evolutionary computation, it is commonly assumed that a search algorithm acquires knowledge about a problem instance by sampling solutions from the search space and evaluating them with a fitness function. This is necessarily inefficient because fitness reveals very little about solutions - yet they contain more information that can be potentially exploited. To address this observation in genetic programming, we propose EvoNUDGE, which uses a graph neural network to elicit additional knowledge from symbolic regression problems. The network is queried on the problem before an evolutionary run to produce a library of subprograms, which is subsequently used to seed the initial population and bias the actions of search operators. In an extensive experiment on a large number of problem instances, EvoNUDGE is shown to significantly outperform multiple baselines, including the conventional tree-based genetic programming and the purely neural variant of the method.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhang:2024:GECCOcomp5, author = {Tuo Zhang and Ying Bi and Jing Liang and Bing Xue and Mengjie Zhang}, title = {Decomposition-based Multi-objective Genetic Programming for Feature Learning in Image Classification}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ting Hu and Aniko Ekart}, pages = {555--558}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, multi-objective optimization, feature learning, image classification, evolutionary computation, genetic algorithms: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654306}, size = {4 pages}, abstract = {Image classification is challenging due to the high dimensionality and large variations of the image data. As an effective method in image classification, feature learning can be considered a multiobjective problem of maximizing the classification accuracy and minimizing the number of learned features. Existing multi-objective genetic programming (MOGP) methods directly apply the multiobjective techniques to GP without considering the characteristics of feature learning tasks. Therefore, this paper proposes a decomposition-based MOGP approach with a global replacement strategy to feature learning in image classification. The proposed approach is compared with existing MOGP methods and the experimental results demonstrate the effectiveness of the proposed approach.}, notes = {GECCO-2024 GP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(alsouly:2024:GECCOcomp, author = {Hanan Alsouly and Michael Kirley and Mario Andres Munoz}, title = {Online {Per-Instance} Algorithm Selection for Constrained {Multi-Objective} Optimization Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {559--562}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {feature-based methods, algorithm selection, multi-objective optimization, constraint handling, adaptive evolutionary algorithm, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654111}, size = {4 pages}, abstract = {Landscape-aware algorithm selection relies on statistical landscape features that are usually extracted offline, independently of the optimization process. However, the computational costs when using this offline method can be excessive and typically the approach ignores information accumulated by the optimization algorithm. In this paper, we investigate online per-instance algorithm selection (PIAS) in black-box constrained multi-objective optimization problems (CMOPs). Two complementary aims guide our work: (1) to reduce the cost of the sampling strategy by using solutions that have already been evaluated by the optimization algorithm as sample points, and (2) investigate whether there are actual performance gains when switching to the selected algorithm. The experimental results show that the feature extraction step can be effectively incorporated into the optimization process, reducing the computational costs of the PIAS model. Moreover, the performance of the Online-PIAS model outperforms the best single algorithm on average and is comparable to the Offline-PIAS model. Additionally, the choice of the starting algorithm significantly impacts the overall performance.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(benjamins:2024:GECCOcomp, author = {Carolin Benjamins and Gjorgjina Cenikj and Ana Nikolikj and Aditya Mohan and Tome Eftimov and Marius Lindauer}, title = {Instance Selection for Dynamic Algorithm Configuration with Reinforcement Learning: Improving Generalization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {563--566}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {dynamic algorithm configuration, reinforcement learning, instance selection, generalization, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654291}, size = {4 pages}, abstract = {Dynamic Algorithm Configuration (DAC) addresses the challenge of dynamically setting hyperparameters of an algorithm for a diverse set of instances rather than focusing solely on individual tasks. Agents trained with Deep Reinforcement Learning (RL) offer a pathway to solve such settings. However, the limited generalization performance of these agents has significantly hindered the application in DAC. Our hypothesis is that a potential bias in the training instances limits generalization capabilities. We take a step towards mitigating this by selecting a representative subset of training instances to overcome overrepresentation and then retraining the agent on this subset to improve its generalization performance. For constructing the meta-features for the subset selection, we particularly account for the dynamic nature of the RL agent by computing time series features on trajectories of actions and rewards generated by the agent's interaction with the environment. Through empirical evaluations on the Sigmoid and CMA-ES benchmarks from the standard benchmark library for DAC, called DACBench, we discuss the potentials of our selection technique compared to training on the entire instance set. Our results highlight the efficacy of instance selection in refining DAC policies for diverse instance spaces.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(catalano:2024:GECCOcomp, author = {Giancarlo Antonino Pasquale Ignazio Catalano and Alexander E. I Brownlee and David Cairns and John Mccall and Russell Ainslie}, title = {Mining Potentially Explanatory Patterns via Partial Solutions}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {567--570}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, explainable AI (XAI), combinatorial optimization problems, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654318}, size = {4 pages}, abstract = {We introduce Partial Solutions to improve the explainability of genetic algorithms for combinatorial optimization. Partial Solutions represent beneficial traits found by analyzing a population, and are presented to the user for explainability, but also provide an explicit model from which new solutions can be generated. We present an algorithm that assembles a collection of explanatory Partial Solutions chosen to strike a balance between simplicity, high fitness and atomicity, that are shown to be able to solve standard optimization benchmarks.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(korosec:2024:GECCOcomp, author = {Peter Korosec and Tome Eftimov}, title = {{Per-Run} Algorithm Performance Improvement Forecasting Using Exploratory Landscape Analysis Features: A Case Study in {Single-Objective} {Black-Box} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {571--574}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {black-box optimization, exploratory landscape analysis, single-objective optimization, forecasting performance, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654304}, size = {4 pages}, abstract = {This study introduces predictive models for automated algorithm performance improvement during runtime, overcoming the traditional prediction approaches that focus only on the final performance outcome. Leveraging sequential iteration data from the run, including solution locations and values (window size), such models forecast performance improvements after a predefined number of iterations (predictive window). The real-time prediction capability enables early evaluation of algorithm hyperparameters, potentially saving substantial computational time. A long short-term memory (LSTM) network, chosen for its efficacy with longitudinal data, is tested and analysed in the scenario where personalization is made to runs from a single problem instance. Utilizing the CEC2014 benchmark suite with 10-dimensional problem instances and various DE configurations, our approach using exploratory landscape analysis features shows promising results by surpassing a standard baseline prediction model.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lange:2024:GECCOcomp, author = {Robert Lange and Yingtao Tian and Yujin Tang}, title = {Evolution Transformer: {In-Context} Evolutionary Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {575--578}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolution strategies, machine learning, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654393}, size = {4 pages}, abstract = {Evolutionary optimization algorithms are often derived from loose biological analogies and struggle to leverage information obtained during the sequential course of optimization. An alternative promising approach is to leverage data and directly discover powerful optimization principles via meta-optimization. In this work, we follow such a paradigm and introduce Evolution Transformer, a causal Transformer architecture, which can flexibly characterize a family of Evolution Strategies. Given a trajectory of evaluations and search distribution statistics, Evolution Transformer outputs a performance-improving update to the search distribution. The architecture imposes a set of suitable inductive biases, i.e. the invariance of the distribution update to the order of population members within a generation and equivariance to the order of the search dimensions. We train the model weights using Evolutionary Algorithm Distillation, a technique for supervised optimization of sequence models using teacher algorithm trajectories. The resulting model exhibits strong in-context optimization performance and shows strong generalization capabilities to otherwise challenging neuroevolution tasks. We analyze the resulting properties of the Evolution Transformer and propose a technique to fully self-referentially train the Evolution Transformer, starting from a random initialization and bootstrapping its own learning progress. We provide an open source implementation under https://github.com/RobertTLange/evosax.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lange:2024:GECCOcomp2, author = {Robert Lange and Yingtao Tian and Yujin Tang}, title = {Large Language Models As Evolution Strategies}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {579--582}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolution strategies, machine learning, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654238}, size = {4 pages}, abstract = {Large Transformer models are capable of implementing a plethora of so-called in-context learning algorithms. These include gradient descent, classification, sequence completion, transformation, and improvement. In this work, we investigate whether large language models (LLMs), which never explicitly encountered the task of black-box optimization, are in principle capable of implementing evolutionary optimization algorithms. While previous works have solely focused on language-based task specification, we move forward and focus on the zero-shot application of LLMs to black-box optimization. We introduce a novel prompting strategy, consisting of least-to-most sorting of discretized population members and querying the LLM to propose an improvement to the mean statistic, i.e. perform a type of black-box recombination operation. Empirically, we find that our setup allows the user to obtain an LLM-based evolution strategy, which we call 'EvoLLM', that robustly outperforms baseline algorithms such as random search and Gaussian Hill Climbing on synthetic BBOB functions as well as small neuroevolution tasks. Hence, LLMs can act as 'plug-in' in-context recombination operators. We provide several comparative studies of the LLM's model size, prompt strategy, and context construction. Finally, we show that one can flexibly improve EvoLLM's performance by providing teacher algorithm information via instruction fine-tuning on previously collected teacher optimization trajectories.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zorn:2024:GECCOcomp, author = {Maximilian Zorn and Sarah Gerner and Philipp Altmann and Thomas Gabor}, title = {Final Productive Fitness for Surrogates in Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Pascal Kerschke and Marie-Eleonore Kessaci}, pages = {583--586}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms, dynamic objective, surrogate, productive fitness, Learning for Evolutionary Computation: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654433}, size = {4 pages}, abstract = {Final productive fitness is an a posteriori fitness estimate for evolutionary algorithms that takes into account the fitness of an individual's descendants. We use that metric in the context of surrogate-based evolutionary algorithms to show that computing a surrogate not for the original objective function but for the final productive fitness based on said objective function improves optimization properties of the surrogate function and might thus be a useful tool for certain dynamic optimization problems.}, notes = {GECCO-2024 L4EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(batra:2024:GECCOcomp, author = {Sumeet Batra and Bryon Tjanaka and Stefanos Nikolaidis and Gaurav Sukhatme}, title = {Quality Diversity for Robot Learning: Limitations and Future Directions}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {587--590}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quality diversity, cognitive maps, reinforcement learning, Neuroevolution: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654431}, size = {4 pages}, abstract = {Quality Diversity (QD) has shown great success in discovering high-performing, diverse policies for robot skill learning. While current benchmarks have led to the development of powerful QD methods, we argue that new paradigms must be developed to facilitate open-ended search and generalizability. In particular, many methods focus on learning diverse agents that each move to a different xy position in MAP-Elites-style bounded archives. Here, we show that such tasks can be accomplished with a single, goal-conditioned policy paired with a classical planner, achieving O(1) space complexity w.r.t. the number of policies and generalization to task variants. We hypothesize that this approach is successful because it extracts task-invariant structural knowledge by modeling a relational graph between adjacent cells in the archive. We motivate this view with emerging evidence from computational neuroscience and explore connections between QD and models of cognitive maps in human and other animal brains. We conclude with a discussion exploring the relationships between QD and cognitive maps, and propose future research directions inspired by cognitive maps towards future generalizable algorithms capable of truly open-ended search.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(da-rold:2024:GECCOcomp, author = {Federico {Da Rold} and Leo Cazenille and Nathanael Aubert-Kato}, title = {Analysis of Pruned Deep Models Trained with Neuroevolution}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {591--594}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {pruning, neural networks, neuroevolution, quality diversity algorithms, network science, information theory: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654379}, size = {4 pages}, abstract = {Deep neural networks show remarkable results in several fields of machine intelligence, such as effective classification in computer vision and robust adaptive systems in reinforcement learning scenarios. However, such models are typically overparametrized, leading to redundant neural pathways that massively increase the computational and energetic costs without providing any significant performance gain. Recent works have addressed the problem by proposing pruning techniques or using gradient-free neuroevolution to minimize model size. However, a systematic analysis of the effects of pruning in neural models during evolution is still missing. We propose an exploratory approach to fill this gap, relying on mathematical tools from network science to capture network structure regularities and information-theoretic analysis to describe the learning process. We focus on reinforcement learning problems solved by a quality diversity approach evolving a pruning operator is evolved along the models. Results from the analysis show the emergence of patterns and regimes in the mutual information and the estimation of network measures. This exploratory work will provide a solid ground for guiding and facilitating the development of pruning algorithms.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(le-clei:2024:GECCOcomp, author = {Maximilien {Le Clei} and Stav Bar-Sheshet and Pierre Bellec}, title = {Generational Information Transfer with Neuroevolution on Control Tasks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {595--598}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {Neuroevolution: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654415}, size = {4 pages}, abstract = {Traditional genetic algorithms compute fitness scores every generation for all agents in a population, which typically requires agents to perform a task until they either fail or succeed. These evaluations can turn into a computational bottleneck when tasks are either time-consuming or infinite. A common workaround is to set an expiration time on agent trials, i.e. to evaluate agents on a subset of the task, which can bias the true task objective. We propose to address this bias through a novel information inheritance genetic algorithm where three distinct attributes can be passed down generations: the task environment state (agents resume from where their parents reached task expiration), the memory state (agents inherit internal representations from their parents); and the fitness (ancestors evaluation scores are compounded with an agent's own score for selection). We benchmark the various combinations of these three inherited attributes by running a genetic neuroevolution algorithm on popular feature-based control tasks. We report that information inheritance can lead to substantial increases in both data and runtime efficiency, suggesting it may greatly benefit a variety of genetic algorithm techniques and applications in the future. We provide the relevant source code.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(londt:2024:GECCOcomp, author = {Trevor Londt and Xiaoying Gao and Yi Mei}, title = {A Cooperative Coevolution Neural Architecture Search Approach for Evolving Convolutional Neural Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {599--602}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neural architecture search, cooperative coevolution, convolutional neural networks, classification, Neuroevolution: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654102}, size = {4 pages}, abstract = {Evolutionary Neural Architecture Search (ENAS) automates convolutional neural network (CNNs) construction. Cooperative Coevolution (COCE) divides an optimisation problem into sub-problems. Our COCE ENAS algorithm decomposes a CNN into sub-architectures, evolving each sub-architecture separately but in cooperation. Tested on Fashion-MNIST, CIFAR-10, AG's News, and Yelp Reviews Full datasets, our proposed algorithm surpasses several state-of-the-art NAS methods in performance. Our algorithm is the first COCE-based ENAS algorithm that focuses on cooperatively evolving the four CNN sub-architectures and is effective in both image and text classification tasks.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(morgan:2024:GECCOcomp, author = {Brandon Morgan and Dean Hougen}, title = {Neural Loss Function Evolution for {Large-Scale} Image Classifier Convolutional Neural Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {603--606}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, neural loss function search, computer vision, deep learning, Neuroevolution: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654380}, size = {4 pages}, abstract = {For classification, neural networks typically learn by minimizing cross-entropy, but are evaluated and compared using accuracy. This disparity suggests neural loss function search (NLFS), the search for a drop-in replacement loss function of cross-entropy for neural networks. We apply NLFS to image classifier convolutional neural networks. We propose a new search space for NLFS that encourages more diverse loss functions to be explored, and a surrogate function that accurately transfers to large-scale convolutional neural networks. We search the space using regularized evolution, a mutation-only aging genetic algorithm. We transferred the final loss functions across multiple architectures, datasets, and image augmentation techniques to assess generalization. In the end, we discovered three new loss functions, called the NeuroLoss functions, that were able to outperform cross-entropy in terms of a higher mean test accuracy as a simple drop-in replacement loss function across the majority of experiments.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pietron:2024:GECCOcomp, author = {Marcin Pietron and Dominik Zurek and Kamil Faber and Roberto Corizzo and Anna Wojcik}, title = {{AD-NEv++} - The multi-architecture neuroevolution-based multivariate anomaly detection framework}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {607--610}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neuroevolution, anomaly detection, deep learning, GNN: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654360}, size = {4 pages}, abstract = {Anomaly detection tools and methods enable key analytical capabilities in modern cyberphysical and sensor-based systems. Despite the fast-paced development in deep learning architectures for anomaly detection, model optimization for a given dataset is a cumbersome and time-consuming process. Neuroevolution could be an effective and efficient solution to this problem, as a fully automated search method for learning optimal neural networks, supporting both gradient and non-gradient fine tuning. However, existing frameworks incorporating neuroevolution lack of support for new layers and architectures and are typically limited to convolutional and LSTM layers. In this paper we propose AD-NEv++, a three-stage neuroevolution-based method that synergically combines subspace evolution, model evolution, and fine-tuning. Our method overcomes the limitations of existing approaches by optimizing the mutation operator in the neuroevolution process, while supporting a wide spectrum of neural layers, including attention, dense, and graph convolutional layers. Our extensive experimental evaluation was conducted with widely adopted multivariate anomaly detection benchmark datasets, and showed that the models generated by AD-NEv++ outperform well-known deep learning architectures and neuroevolution-based approaches for anomaly detection. Moreover, results show that AD-NEv++ can improve and outperform the state-of-the-art GNN (Graph Neural Networks) model architecture in all anomaly detection benchmarks.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(saifullah:2024:GECCOcomp, author = {Shoffan Saifullah and Rafal Drezewski}, title = {Improved Brain Tumor Segmentation Using Modified {U-Net} based on Particle Swarm Optimization Image Enhancement}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {611--614}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {medical image enhancement, brain tumor detection, image segmentation, modified U-net, particle swarm optimization, Neuroevolution: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654339}, size = {4 pages}, abstract = {This study introduces a robust methodology, a Modified U-Net with Particle-Swarm-Optimization-based Image Enhancement, to address the complexities of brain tumor segmentation. Leveraging PSO-based Image Enhancement's adaptive features, our approach achieves superior performance on a dataset of 3064 Brain MRI images, boasting an accuracy of 99.93\%, minimal loss (0.0015), and impressive Dice (0.9699) and Jaccard index (0.9421) values for overall images. The method significantly improves segmentation accuracy, as evidenced by the increase of 9.37 p.p. in Dice and 5.3 p.p in the Jaccard index compared to the U-Net basic approach. Comparative analysis with other methods, including Modified U-Net variants, LinkNet, SegNet, Active Contour, and Fuzzy C-Means, consistently demonstrates outperformance. This method advances medical image analysis by providing precise segmentation and paves the way for future research into optimization and extensions for diverse medical imaging applications.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(tadepalli:2024:GECCOcomp, author = {Aswitha Tadepalli and Kishalay Mitra}, title = {Satellite Image based Crop Classification Using Convolutional Autoencoder}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {615--618}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {remote sensing, computer vision, convolutional auto encoder, LSTM, deep learning, Neuroevolution: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654383}, size = {4 pages}, abstract = {Crop classification using satellite images is one of the crucial techniques in crop monitoring. It is essential in planning and managing various policies and utilizing water resources required for maintaining and enhancing crop growth and yield. Conventionally, crop classification is performed by manually extracting the vegetation indices and spectral data from the images and using them in machine-learning models. However, these techniques are highly tedious and limited to only the region of interest. To overcome these issues, recently developed advanced deep learning techniques are of interest in the remote sensing field. Different combinations of spatial and temporal models such as VGG Net, and Google Net combined with Recurrent Neural Network (RNN), self-attention models, and Long Short-Term Memory (LSTM) are currently being used for crop classification. However, we present a novel spatial encoder, Convolutional Auto Encoder (CAE), to competently extract the spatial features of the satellite images and use them in the LSTM models for the classification task. Additionally, to improve the classification accuracy, we optimize the architecture of LSTM models using the Bayesian Optimization technique. Finally, we compare the proposed model's performance with the reported models in the literature to justify its potential for crop classification tasks.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(valentim:2024:GECCOcomp, author = {Ines Valentim and Nuno Antunes and Nuno Lourenco}, title = {Exploring Layerwise Adversarial Robustness Through the Lens of {t-SNE}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Gabriela Ochoa and Antoine Cully}, pages = {619--622}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {adversarial examples, latent space visualization, neuroevolution, robustness: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654258}, size = {4 pages}, abstract = {Adversarial examples, designed to trick Artificial Neural Networks (ANNs) into producing wrong outputs, highlight vulnerabilities in these models. Exploring these weaknesses is crucial for developing defenses, and so, we propose a method to assess the adversarial robustness of image-classifying ANNs. The t-distributed Stochastic Neighbor Embedding (t-SNE) technique is used for visual inspection, and a metric, which compares the clean and perturbed embeddings, helps pinpoint weak spots in the layers. Analyzing two ANNs on CIFAR-10, one designed by humans and another via NeuroEvolution, we found that differences between clean and perturbed representations emerge early on, in the feature extraction layers, affecting subsequent classification. The findings with our metric are supported by the visual analysis of the t-SNE maps.}, notes = {GECCO-2024 NE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(anderer:2024:GECCOcomp, author = {Simon Anderer and Nicolas Justen and Bernd Scheuermann and Sanaz Mostaghim}, title = {Survival Strategies for Evolutionary Role Mining Algorithms Using Expert Knowledge}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {623--626}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {interactive role mining, evolutionary algorithms, expert knowledge, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654183}, size = {4 pages}, abstract = {To ensure the security of IT systems of companies and organizations, strong and reliable access control mechanisms must be implemented. A widely used approach in this context is Role Based Access Control, in which permissions are grouped into roles, that are then assigned to the users of the IT system. Such assignments are referred to as role concepts. The goal of the Role Mining Problem (RMP) is to determine the role concept with the minimum number of roles. The RMP has been shown to be NP-complete and evolutionary algorithms have been applied as suitable meta-heuristics to search for good solutions. Recently, it has been demonstrated that the integration of expert knowledge through user interaction with the running evolutionary algorithms can significantly improve and speed up the optimization process. This paper, however, explores a different approach to incorporating expert knowledge by injecting favorable roles into the individuals representing role concepts. The extent to which such injections affect the optimization progress is investigated, and various survival strategies are presented to ensure that individuals with injected roles survive long enough to exert their positive influence on the optimization process.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(antkiewicz:2024:GECCOcomp, author = {Michal Antkiewicz and Pawel B. Myszkowski and Konrad Gmyrek}, title = {Gene-level adaptation in Balanced {Non-Dominated} Tournament Genetic Algorithm {(aB-NTGA)} applied to versatile {Multi-Stage} {Weapon-Target} Assignment Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {627--630}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-objective optimization, weapon-target assignment, evolutionary computation, adaptation, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654342}, size = {4 pages}, abstract = {This paper considers the Multi-Stage Weapon-Target Assignment (MWTA) as a multi-objective Multi-Stage Resource Allocation and provides a generalized model. We identify other problems beyond the military domain that can share the model using the proposed mappings. In response to limited benchmarks, and their lack of diversity, we introduce a novel differentiated suite of instances. Incorporating relations between variables, results in a more realistic and challenging benchmark. We examined the benchmark using state-of-the-art multi-objective optimization methods and proposed a gene-level mutation adaptation mechanism to enhance the effectiveness of the Balanced Non-Dominated Tournament Genetic Algorithm (B-NTGA).}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(blair:2024:GECCOcomp, author = {Aidan Blair and Amirali {Khodadadian Gostar} and Xiaodong Li and Alireza Bab-Hadiashar and Reza Hoseinnezhad}, title = {Evaluation of Genetic Algorithms in {Multi-Vehicle} Control for {Multi-Target} Tracking Applications}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {631--634}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, multi-target tracking, vehicle control, distributed sensor network, random finite sets, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654261}, size = {4 pages}, abstract = {Tracking multiple dynamic targets using a network of sensors is a challenging yet essential task in intelligent vehicles, that requires positioning the sensors in optimal locations to collect informative measurements of the system. This paper studies the application of genetic algorithms (GAs) in solving this problem. Multiple GA-based multi-sensor control algorithms are proposed, tested and compared against existing baseline methods on challenging simulated scenarios. We find that the proposed solutions significantly enhance tracking performance and computational tractability compared to the baseline methods, showcasing the suitability of GA-based control methods in intelligent transportation systems.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bo:2024:GECCOcomp, author = {Zi Yang Bo and Qiang Yang and Ya Hui Jia and Xu Dong Gao and Zhen Yu Lu and Jun Zhang}, title = {Evolutionary Algorithm with {Cross-Generation} Environmental Selection for Traveling Salesman Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {635--638}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {traveling salesman problem, evolutionary algorithms, cross-generation environmental selection, combinatorial optimization, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654166}, size = {4 pages}, abstract = {To enhance the evolution of classical evolutionary algorithms (EAs) in solving the traveling salesman problem (TSP), this paper devises a cross-generation environmental selection mechanism to pick promising yet diversified individuals for the next iteration. Specifically, instead of only using parental individuals or offspring individuals, such an environmental selection strategy combines the offspring in two consecutive generations and then selects the half best individuals as the parent population for EAs to evolve in the next iteration. In this way, EAs with this approach not only preserve rich diversity but also have rapid convergence, enabling the population to discover optimal solutions effectively and efficiently. Particularly, this paper embeds this environmental selection strategy into the classical genetic algorithm (GA) along with three different crossover strategies to solve TSP. Experiments have been conducted on 8 TSP instances of different scales from the TSBLIB benchmark set. Experimental results demonstrate that the proposed environmental selection scheme is very helpful for EAs to solve TSP.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(carlet:2024:GECCOcomp, author = {Claude Carlet and Marko Durasevic and Domagoj Jakobovic and Stjepan Picek}, title = {On the Evolution of Boolean Functions with the Algebraic Normal Form Representation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {639--642}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms, algebraic normal form, algebraic degree, nonlinearity, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654386}, size = {4 pages}, abstract = {This work investigates the evolution of Boolean functions represented with the algebraic normal form (ANF) representation. This novel direction allows for a better "fit" between the representations and properties but also presents some challenges. Our experimental evaluation shows that ANF representation works well and can find highly nonlinear (balanced) functions. Our results indicate that the bitstring encoding with the algebraic normal form representation can perform better than the bitstring encoding with the truth table representation, which is the most common approach in the literature.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(che:2024:GECCOcomp, author = {Gelegen Che and Lixin Tang and Guodong Zhao and Qingxin Guo and Jianqiang Guo}, title = {An Enhanced {Surrogate-Assisted} {Multi-Objective} Differential Evolution Algorithm with {Self-Adaptive} Strategies for Order Planning in Hot Rolling Process: {Surrogate-Assisted} {MODE} with {Self-Adaptive} Strategies for {OPHR} Problem}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {643--646}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {order planning, hot rolling, multi-objective differential evolution, surrogate model, adaptive mutation strategy, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654244}, size = {4 pages}, abstract = {This paper studies a crucial decision-making problem of order planning for a hot rolling process (OPHR). The OPHR problem is to determine the optimal sequence of orders, with the goals of minimizing penalties for earliness/tardiness, maximizing profits from special orders, and minimizing transition penalties of adjacent orders. A multi-objective differential evolution (MODE) method with self-adaptive strategies (SaMODE) is developed to tackle it. Then, a radial basis function neural network is integrated into the SaMODE (RBFNN-SaMODE) as a surrogate model to reduce the computational burden of evaluating objectives throughout evolution. The proposed method is extensively tested on real production data, and the results demonstrate its superiority over the classical MODE, yielding highly effective solutions.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chen:2024:GECCOcomp4, author = {Siping Chen and Debiao Li and Xiqin Gan and Raymond Chiong}, title = {An Optimised Light Gradient Boosting Machine Model for Setup Time Prediction in Electronic Production Lines}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {647--650}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {setup time prediction, ensemble learning, hyperparameter optimisation, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654428}, size = {4 pages}, abstract = {Setup time is pivotal in printed circuit board (PCB) assembly line operations. However, PCB production encounters varying setup times due to multiple influencing factors. This paper addresses an uncertain setup time prediction problem in PCB assembly production lines. Unlike existing production time prediction models, our proposed approach integrates a comprehensive range of production features, not only with features related to PCBs but also production line operators, setup procedures and so on. To enhance model accuracy and mitigate overfitting, we implemented some data preprocessing phases and designed a random forest-integrated feature selection method. With the selected features, we used a light gradient boosting machine (LightGBM) as the predictive model and optimised its hyperparameters by a differential evolution (DE) algorithm. We validated our model's performance through extensive computational experiments based on real-world industrial data, focusing on feature selection efficiency and hyperparameter optimisation. The experimental results confirmed that our proposed DE-LightGBM can reduce redundant features and optimise the integral hyperparameters for model training. We also compared the DE-LightGBM model to some well-established machine learning approaches in different setup scenarios. The proposed DE-LightGBM outperformed other machine learning methods being compared, delivering accurate setup time predictions in both standard and complex scenarios.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chien:2024:GECCOcomp, author = {Trinh Van Chien and Bui Trong Duc and Ho Viet Duc Luong and Huynh Thi Thanh Binh and Hien Quoc Ngo and Symeon Chatzinotas}, title = {Solving Indefinite Communication Reliability Optimization for {RIS-Aided} Mobile Systems by an Improved Differential Evolution}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {651--654}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {RIS-assisted MIMO, beamforming design, communication reliability, differential evolution, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654300}, size = {4 pages}, abstract = {This paper exploits the applications of evolutionary algorithms to solve a challenging category of optimization problems in 6G mobile networks, particularly focusing on communication reliability with modulated signals of reconfigurable intelligent surface (RIS)-assisted multiple input multiple output (MIMO) systems. By deriving the analytical downlink symbol error rate (SER) of each user as a multivariate function of both the phase-shift and beam-forming vectors, we introduce a novel average SER minimization problem subject to the transmitted power budget and phase shift coefficients, which is NP-hard. By incorporating the differential evolution (DE) algorithm as a pivotal tool and an efficient local search to overcome the local optimum for optimizing the intricate active and passive beamforming variables, the non-convexity of the considered SER optimization problem can be effectively handled. Numerical results indicate that the proposed joint active and passive beamforming design is superior to the other benchmarks.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(cunegatti:2024:GECCOcomp, author = {Elia Cunegatti and Leonardo Custode and Giovanni Iacca}, title = {{Many-Objective} Evolutionary Influence Maximization: Balancing Spread, Budget, Fairness, and Time}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {655--658}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {influence maximization, multi-objective optimization, evolutionary computation, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654161}, size = {4 pages}, abstract = {The Influence Maximization (IM) problem seeks to discover the set of nodes in a graph that can spread the information propagation at most. This problem is known to be NP-hard, and it is usually studied by maximizing the influence (spread) and, optionally, optimizing a second objective, such as minimizing the seed set size or maximizing the influence fairness. In this work, we propose a first case study where several IM-specific objective functions, namely budget, fairness, communities, and time, are optimized on top of the maximization of influence and minimization of the seed set size. To this aim, we introduce MOEIM (Many-Objective Evolutionary Algorithm for Influence Maximization), a Multi-Objective Evolutionary Algorithm (MOEA) based on NSGA-II incorporating graph-aware operators and a smart initialization. We compare MOEIM in two experimental settings, including a total of nine graph datasets, two heuristic methods, a related MOEA, and a state-of-the-art Deep Learning approach. The experiments show that MOEIM overall outperforms the competitors in most of the tested many-objective settings. The codebase and Supplementary Material of this work are available at https://github.com/eliacunegatti/MOEIM.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(fitzgerald:2024:GECCOcomp, author = {Seth G. Fitzgerald and Sarah Baldwin and Gary W. Delaney and David Howard and Frederic Maire}, title = {Evolution of Material Properties and Gripping Strategy in Soft Robotic Jamming Devices}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {659--662}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary robotics, granular materials, soft robotics, NSGA-III, multi-objective optimization, discrete element method, computational modelling, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654384}, size = {4 pages}, abstract = {We consider the problem of design of granular materials for optimal gripping behaviour in soft robotic devices. This represents a highly challenging multi-dimensional design problem, requiring consideration of material, geometry and control strategies. Our system consists of a bag gripper composed of a flexible balloon attached to a vacuum pump, filled with granular material of non-spherical particles. At atmospheric pressure the bag is soft, flexible and deforms around a target object. When the vacuum is then applied, the granular material within the gripper densifies, freezing into a rigid shape and gripping the target object. The morphology of the particles within the granular material play a crucial role in determining the dynamics of the gripping behaviour and the overall performance of the gripper in terms of its interaction with the target object and the strength of the grip (quantified as the pull off force). In this work we consider both a real world physical gripper and a corresponding computational model (based on the Discrete Element Method), and employ NSGA-III to optimise the design of both the complex structure of the granular material within the gripper and also an optimal gripping strategy to achieve maximum gripping performance for multiple fitness criteria.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(fu:2024:GECCOcomp, author = {Yu Fu}, title = {Dynamic Evaluation Functions Based Routing and Wavelength Assignment Optimization for Optical Network Capacity Expansion}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {663--666}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {routing and wavelength assignment, iterated local search, dynamic evaluation function, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654114}, size = {4 pages}, abstract = {The rapid growth of services has resulted in a frequent expansion of optical network capacity, leading to increasing challenges with Routing and Wavelength Assignment (RWA)---the strategy for allocating resources that limits optical networks' line utilization and capacity. As an NP-hard problem, RWA significantly impacts the cost of expanding optical network capacity, the deployment cycle, and the utilization of fiber resources. This study decomposes the optical network capacity expansion problem into two parts: the RWA optimization problem for stock fiber based on link redundancy and the minimization problem for incremental fiber based on link utilization. The former is solved using the shortest path strategy, which reduces the RWA reallocation rate and service interruption rate, and the latter is solved using the tabu search strategy, which improves fiber resource utilization. Experiments have shown that this method reduces service expansion configuration delay to milliseconds and improves fiber resource utilization by about 70\% on real-case RWA instances. This positively affects the rapid expansion of optical networks with expanding parameter dimensions of optical devices.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(guo:2024:GECCOcomp, author = {Jinyue Guo and Anna-Maria Christodoulou and Balint Laczko and Kyrre Glette}, title = {{LVNS-RAVE:} Diversified audio generation with {RAVE} and Latent Vector Novelty Search}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {667--670}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neural audio synthesis, variational autoencoder, latent vector evolution, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654432}, size = {4 pages}, abstract = {Evolutionary Algorithms and Generative Deep Learning have been two of the most powerful tools for sound generation tasks. However, they have limitations: Evolutionary Algorithms require complicated designs, posing challenges in control and achieving realistic sound generation. Generative Deep Learning models often copy from the dataset and lack creativity. In this paper, we propose LVNS-RAVE, a method to combine Evolutionary Algorithms and Generative Deep Learning to produce realistic and novel sounds. We use the RAVE model as the sound generator and the VGGish model as a novelty evaluator in the Latent Vector Novelty Search (LVNS) algorithm. The reported experiments show that the method can successfully generate diversified, novel audio samples under different mutation setups using different pre-trained RAVE models. The characteristics of the generation process can be easily controlled with the mutation parameters. The proposed algorithm can be a creative tool for sound artists and musicians.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kanani:2024:GECCOcomp, author = {Somayeh Kanani and Parham Moradi and Mahdi Jalili}, title = {Decomposition and {Clustering-Based} {Many-Objective} Optimization for {Multi-Label} Feature Selection}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {671--674}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-label feature selection, many-objective optimization, decomposition, density-based clustering, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654279}, size = {4 pages}, abstract = {In this paper a novel many-objective optimization algorithm called MaEA/DC is proposed. The proposed method utilizes the random objective division (ROD) strategy to decompose solutions across various objectives as a pivotal step, thereby enhancing search capabilities through simultaneous application of multiple decomposition methods. Moreover, to address the abundance of non-dominant solutions in many-objective problems, a density-based clustering method is employed to cluster the solutions along the Pareto front. The cluster centers are then selected, while eliminating the others to reduce the density of the Pareto front. This approach ensures the improvement of population convergence while maintaining its diversity. Experimental validation demonstrates our method effectively balances multiple objectives, eliminates irrelevant and redundant features, and achieving satisfactory classification results in comparison to other methods.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kim:2024:GECCOcomp2, author = {Tae-Hoon Kim and Seung-Hyun Moon and Yong-Hyuk Kim}, title = {Evolutionary Ensemble for Predicting Drifter Trajectories Based on Genetic Feature Selection}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {675--678}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, machine learning, feature selection, ensemble methods, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654419}, size = {4 pages}, abstract = {In the event of a maritime accident, accurately predicting the trajectories of objects is crucial for devising appropriate measures to rescue individuals in distress and prevent the spread of oil spills. To achieve this, we created four machine learning models using data obtained from the drifter and particle trajectory modeling framework. However, the data had only six input factors, which were insufficient for accurate trajectory prediction. Therefore, we conducted feature expansion using time series characteristics of the data and created 24 additional features to use as input factors, resulting in an average performance improvement of 74\% compared to using only the basic six features. In addition, we performed feature selection using a wrapper method based on genetic search to remove unnecessary features, improving computing time by an average of 28\% and performance by an average of 0.1\%. Subsequently, we conducted an ensemble of the four machine learning models. We used weighted voting as the ensemble technique and optimized the weights using a genetic algorithm. The results showed better performance than the ensemble model with uniform weights by an average of 1.9\% and the four base models by an average of 5.4\%.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lee:2024:GECCOcomp, author = {So-Jung Lee and Yong-Hyuk Kim}, title = {A Memetic Algorithm to Identify Search Patterns for Maximal Coverage of Drifting Oceanic Particles}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {679--682}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, local search, memetic algorithm, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654418}, size = {4 pages}, abstract = {The analysis of maritime distress incidents, which has increased due to the growing number of international cargo transportations, reveals that a significant number of distressed individuals go missing or perish due to prolonged search times. Consequently, this paper places emphasis on identifying an optimal route to locate as many distressed individuals as possible. The experiment was conducted by extracting paths based on the predicted movement of particles influenced by various factors. Our objective is to derive a path that encompasses a greater number of particles compared to simple paths. To enhance performance, we opted for a memetic algorithm instead of a pure genetic algorithm. We conducted a comparative analysis using order crossover and PMX, as we sought to identify the one yielding better result. The experimental results demonstrated an improvement in the memetic algorithm, achieving a 79\% enhancement compared to the pure genetic algorithm in terms of particle coverage. For an 8\texttimes{}12 grid, it covered 45\% more particles than simple paths, and PMX yielded 1.1\% better results than order crossover on average. For a 6\texttimes{}9 grid, both techniques performed similarly, and the results showed that the number of particles covered was approximately 33\% higher than simple paths.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(llanza:2024:GECCOcomp, author = {Arcadi Llanza and Nadiya Shvai and Amir Nakib}, title = {Evolutionary Algorithm Based Adaptive Image Anonymization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {683--686}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {optimization, metaheuristics, image classification, privacy protection, neural network, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654427}, size = {4 pages}, abstract = {Data privacy laws put restrictions on the processes of image collection, storage, and usage. Commonly sensitive information in an image can be removed by modifying the corresponding areas of image by blurring, masking, or replacement. However, an image anonymized in such a way may result in an unexpected outcome from a neural network performing a computer vision task. In particular, a classification prediction for an anonymized image might significantly differ from a prediction for the original image. In this work, we introduced an adaptive image anonymization method based on evolutionary algorithm that allows to avoid unwanted prediction discrepancy. The proposed approach permits to construct anonymized areas within an image and, it does not require access to the architecture and weights of the neural network that performs classification. Experiments conducted on the ImageNet benchmark dataset with face anonymization convincingly prove the efficiency of the proposed method.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(luo:2024:GECCOcomp, author = {Zhongtian Luo and Jianpeng Xiong and Hu Peng and Gaosheng Zhan and Qingfu Zhang and Hui Wang and Xinyu Zhou and Wei Li and Ying Huang}, title = {A Micro Dynamic Multi-objective Evolutionary Algorithm for Small-scale Smart Greenhouse with Low-power Microprocessor}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {687--690}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {a micro dynamic multi-objective algorithm, smart greenhouse, low-power microprocessor, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654276}, size = {4 pages}, abstract = {Smart greenhouse is a modern agricultural facility that integrates smart control systems to regulate the plant growth environment through advanced intelligent technology and devices. In recent years, smart greenhouses have received widespread attention and have been applied in agriculture. Due to their high energy demands and costs, current smart greenhouses are often impractical for applications with limited resources. Nevertheless, small-scale smart greenhouse with low-power microprocessor, are more suitable for homes, offices, and other fields. Therefore, this paper proposes a micro dynamic multi-objective evolutionary algorithm (muDMOEA) for small-scale smart greenhouse with low-power microprocessor, which applies chaotic mapping to select dynamic response strategies based on the fitness of dominant relationships and k-nearest neighbor environmental selection. muDMOEA performs well in the simulation of small-scale smart greenhouses. It not only outperforms SGEA, DNSGA-II, and RVCP in IGD indicator but also plays a good role in adjusting environmental parameters. It demonstrates the feasibility and effectiveness of micro dynamic multi-objective optimization on small-scale smart greenhouse with low-power microprocessor.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(murali:2024:GECCOcomp, author = {Ritwik Murali and Veeramanohar Avudaiappan}, title = {Exploring Evolution for Aesthetic and Abstract 3D Art}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {691--694}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {3D ART, abstract art, aesthetic measures, computational creativity, computational aesthetics, evolutionary algorithms, evolutionary art, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654321}, size = {4 pages}, abstract = {Generating 3D objects to produce art that diverges from conventional or existing artistic patterns is a major challenge. Given the growing prevalence of three-dimensional (3D) art in various domains such as architecture, game design, augmented reality, virtual reality, etc., it becomes imperative to devise strategies for generating diverse forms of 3D objects. This research explores the evolution of 3D artistic objects that are complex, aesthetically pleasing and abstract in nature. Eleven different fitness metrics adapted from existing 2D image evolution metrics are first used to generate the 3D models and subsequently 3 new metrics are proposed specifically for the 3D model generation. Then, 4 different methods are used to set the initial population of each of the 14 different metrics resulting in 56 different 3D model evolution techniques. Finally, user feedback (as a survey) is used to evaluate the aesthetics of the different models evolved by these fitness metrics. Results show that the models evolved by the proposed 3D specific fitness metrics were rated higher than others by the users. In a broader context, this research pushes the boundaries of 3D art generation and evaluation techniques, while also providing insights and opportunities for future in-depth exploration of 3D art generation.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ruszczak:2024:GECCOcomp2, author = {Bogdan Ruszczak and Michal Myller and Lukasz Tulczyjew and Agata Wijata and Jakub Nalepa}, title = {Particle Swarm Optimization Meets Deep Learning for Estimating {Root-Zone} Soil Moisture from Hyperspectral Images}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {695--698}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {particle swarm optimization, deep learning, hyperspectral imaging, root-zone soil moisture estimation, hyperparameter optimization, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654417}, size = {4 pages}, abstract = {Monitoring crop irrigation levels and the soil moisture in the root zone is of paramount importance in precision agriculture (PA), as it allows practitioners to optimize the water supply. This can, in turn, directly lead to significant water savings while maintaining appropriate cultivation practices. Currently adopted in-field methods to monitor root-zone soil moisture are costly, human-dependent, and unscalable to large spatial areas. We tackle this issue and propose MoleNose---an approach for estimating root-zone soil moisture from hyperspectral images using deep learning models enhanced through particle swarm optimization. Our experimental study, performed over a real-world dataset of hyperspectral images collocated with in-situ soil moisture measurements, revealed that MoleNose significantly outperforms hand-crafted deep learning architectures in various temporal analysis scenarios, in which we estimate the root-zone soil moisture in different time points in the season.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(shyne:2024:GECCOcomp, author = {Fiona Shyne and Kaylah Facey and Seth Cooper}, title = {Generating Solvable and Difficult Logic Grid Puzzles}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {699--702}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {logic puzzles, procedural content generation, FI-2Pop, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654337}, size = {4 pages}, abstract = {Logic grid puzzles are a popular variety of pen-and-paper puzzles. In logic grid puzzles, players are given natural language hints that they use to mark relationships between entities on a grid. We demonstrate the ability of a Feasible-Infeasible Two-Population (FI-2Pop) genetic algorithm to produce high-quality logic grid puzzles. Hints are constructed using a hand-authored grammar that represents typical types of hints. Infeasible individuals are evolved to approach becoming solvable. Feasible individuals are optimized based on estimated difficulty and hint count. The final evolved puzzles require deductive reasoning skills of the player and were found challenging by the authors of this paper.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wijata:2024:GECCOcomp, author = {Agata M. Wijata and Bartlomiej Pycinski and Jakub Nalepa}, title = {Selecting Image Features for Biopsy Needle Detection in Ultrasound Images Using Genetic Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {703--706}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {core needle biopsy, needle detection, machine learning, ultrasound imaging, feature selection, genetic algorithm, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654434}, size = {4 pages}, abstract = {Locating the biopsy needle in ultrasound (US) images is a crucial task in medical image analysis. It aids clinicians in minimizing the risk of damaging surrounding tissue during a US-guided core needle biopsy and it allows to reduce its duration. Numerous studies have explored needle segmentation from US images, but most of them operate under the unrealistic assumption that the needle is always present in the image. To address this gap, we propose an approach for detecting the biopsy needle in US images. It couples classic machine learning with a genetic algorithm identifying the most relevant image features that contribute to needle localization. We thus concentrate on the most significant features and prune unnecessary extractors to enhance the efficiency of the pipeline which is of paramount importance in time-constrained clinical settings. Our experiments showed that genetically evolved feature subsets allow us to build effective needle detectors outperforming models trained over full feature sets, and they can be flexibly incorporated into cascaded multi-scale detection pipelines.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zhao:2024:GECCOcomp4, author = {Junfeng Zhao and Lixin Tang and Yong Shuai}, title = {Exploring a Small Molecule Property Prediction Model with Optimal Comprehensive Performance through {Multi-Objective} Optimization Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Ruhul Sarker and Patrick Siarry}, pages = {707--710}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {machine learning, molecular property prediction, NSDE, MOP, Real World Applications: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654410}, size = {4 pages}, abstract = {The evolution of artificial intelligence has given rise to numerous machine learning (ML) models for predicting the structural properties of materials, expediting the process of new material development. However, many of these models have parameters set based on empirical knowledge, lacking validation for optimality or multi-objective fitness. In this study, we employ a fusion modeling approach that integrates physical mechanisms with ML to construct a neural network model for predicting multiple properties of small molecules. We utilize the non-dominated sorting differential evolution (NSDE) algorithm to explore the dimensions of the network input data and the structure of the network. The goal is to identify feasible solutions that simultaneously optimize the predictive performance of the model and the training time cost. Experimental results show that the trained model determined by multi-objective optimization (MOP) not only exhibits outstanding predictive capabilities for multiple properties of small molecules but also achieves a balance between predictive performance and training time cost, showcasing excellent comprehensive performance.}, notes = {GECCO-2024 RWA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bui:2024:GECCOcomp, author = {Thi-Mai-Anh Bui and Van-Tri Do and Minh Vu Le and Quoc-Trung Bui}, title = {Dynamic Difficulty Coefficient in {Search-Based} Software Testing: Targeting to Hard Branch Coverage}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dominik Sobania and Aymeric Blot}, pages = {711--714}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {software search based testing, constraint based testing, genetic algorithm, automated testcase generation, Search-Based Software Engineering: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654411}, size = {4 pages}, abstract = {Within the domain of Search-Based Software Testing (SBST), there is a growing research emphasis on hard branch coverage during the generation of test data. A considerable portion of this research strives to enhance the fitness function by integrating branch complexity, providing additional insights to fine-tune the search mechanism. However, current studies encounter a limitation as the computation of branch difficulty is straightforward, potentially leading to inefficiency and getting stuck in local optima, especially when dealing with nested predicates. This paper introduces a genetic algorithm designed to tackle test data generation with a specific focus on hard branches. The proposed algorithm aims to identify optimal test-suite candidates, covering as many branches as possible by employing a fitness function dynamically computed based on the difficulty coefficient of branch targets. Additionally, we enhance genetic operators to guide the evolutionary process toward hard branches. Empirical studies indicate the efficiency and effectiveness of the proposed approach, significantly outperforming counterparts, especially for covering difficult-to-reach branches.}, notes = {GECCO-2024 SBSE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(dougherty:2024:GECCOcomp, author = {Ryan Edward Dougherty and Dylan Nathan Green and Hyunmook Kang and Grace Miro Kim and Stephanie Forrest}, title = {A {Multi-Objective} Genetic Algorithm for Location in Interaction Testing}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dominik Sobania and Aymeric Blot}, pages = {715--718}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {locating arrays, combinatorial testing, genetic algorithm, evolutionary computation, Search-Based Software Engineering: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654260}, size = {4 pages}, abstract = {Software testing is a key component of the software engineering process, but modern software is highly complex. Software configurations involve many interacting components and interactions among them can strongly affect the software's behavior in hard-to-predict ways. Combinatorial interaction testing (CIT) concerns the creation of test suites that either detect or locate the most important interactions in a large scale software system. Locating Arrays (LAs) are a data structure that guarantees a unique location for every such set of interactions. In this paper we present LocAG, an algorithm that generates LAs. Our approach uses a simple but powerful "partitioning" method of interactions to greatly reduce the computational cost of verifying a candidate LA. Further, we use evolutionary computation to quickly determine any additional tests after the partitioning method is complete. We are able to generate LAs for larger systems faster, with any desired separation, and greater interaction size than any existing approach.}, notes = {GECCO-2024 SBSE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(liu:2024:GECCOcomp2, author = {Yueyue Liu and Hongyu Zhang and Van-Hoang Le and Yuantian Miao and Zhiqiang Li}, title = {Local Search-based Approach for Cost-effective Job Assignment on Large Language Models}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dominik Sobania and Aymeric Blot}, pages = {719--722}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {large language models, job assignment, local search, log parsing, Search-Based Software Engineering: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654104}, size = {4 pages}, abstract = {Large Language Models (LLMs) have garnered significant attention due to their impressive capabilities. However, leveraging LLMs can be expensive due to the computational resources required, with costs depending on invocation numbers and input prompt lengths. Generally, larger LLMs deliver better performance but at a higher cost. In addition, prompts that provide more guidance to LLMs can increase the probability of correctly processing the job but also tend to be longer, increasing the processing cost. Therefore, selecting an appropriate LLM and prompt template is crucial for achieving an optimal trade-off between cost and performance. This paper formulates the job assignment on LLMs as a multi-objective optimisation problem and proposes a local search-based algorithm, termed LSAP, which aims to minimise the invocations cost while maximising overall performance. First, historical data is used to estimate the accuracy of each job submitted to a candidate LLM with a chosen prompt template. Subsequently, LSAP combines heuristic rules to select an appropriate LLM and prompt template based on the invocation cost and estimated accuracy. Extensive experiments on LLM-based log parsing, a typical software maintenance task that utilizes LLMs, demonstrate that LSAP can efficiently generate solutions with significantly lower cost and higher accuracy compared to the baselines.}, notes = {GECCO-2024 SBSE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(maliukov:2024:GECCOcomp, author = {Irina Maliukov and Gera Weiss and Oded Margalit and Achiya Elyasaf}, title = {Evolving Assembly Code in an Adversarial Environment}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dominik Sobania and Aymeric Blot}, pages = {723--726}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, assembly, code generation, codeguru xtreme, Search-Based Software Engineering: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654209}, size = {4 pages}, abstract = {We evolve survivors for the CodeGuru competition --- assembly programs that run the longest in shared memory, by resisting attacks from adversary survivors and finding their weaknesses. For evolving top-notch solvers, we specify a Backus Normal Form (BNF) for the assembly language and synthesize the code from scratch using Genetic Programming (GP). We evaluate the survivors by running CodeGuru games against human-written winning survivors. Our evolved programs found weaknesses in the programs they were trained against and utilized them. This work has important applications for cyber-security, as we utilize evolution to detect weaknesses in survivors. The assembly BNF is domain-independent; thus, by modifying the fitness function, it can detect code weaknesses and help fix them. Finally, the CodeGuru competition offers a novel platform for analyzing GP and code evolution in adversarial environments. To support further research in this direction, we provide a thorough qualitative analysis of the evolved survivors and the weaknesses found.}, notes = {GECCO-2024 SBSE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(saemundsdottir:2024:GECCOcomp, author = {Gudny Birna Saemundsdottir and Saemundur Oskar Haraldsson}, title = {Large Language Models as All-in-one Operators for Genetic Improvement}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dominik Sobania and Aymeric Blot}, pages = {727--730}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic improvement, program repair, search based software engineering, large language models, recombination operators, Search-Based Software Engineering: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654408}, size = {4 pages}, abstract = {Due to their versatility and increasing popularity, Large Language Models (LLMs) can offer exciting new research avenues in most fields of science and technology. This paper describes a proof-of-concept experiment on the applicability of LLMs as a recombination operator for Genetic Improvement (GI).A simple GI search was applied to two faulty Python functions that are generally considered appropriate exercises for beginner programmers. The initial generation of programs was seeded with a set of faulty versions and then GPT-3.5 turbo from OpenAI was used with minimal prompting, as the only recombination operator to generate subsequent generations.Several interesting observations were made that will guide further investigations into the use of LLMs in GI. Key among those was that it seems LLMs will need substantial and highly optimised prompting to be effective standalone GI operators. The LLM did not recognise logic errors but when the search was seeded with syntax errors then the performance was significantly better. An important feature of generative AI that was observed and should be exploited in future work is its ability to synthesise mutations even if the prompt did not ask for them.}, notes = {GECCO-2024 SBSE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(semujju:2024:GECCOcomp, author = {Stuart Semujju and Fangqing Liu and Han Huang and Yi Xiang}, title = {Enhancing Fault Detection in Smart Contract Loops Through Adaptive Probabilistic Sampling}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Dominik Sobania and Aymeric Blot}, pages = {731--734}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {smart contracts, mutation testing, fault-detection, automated test case generation, Search-Based Software Engineering: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654283}, size = {4 pages}, abstract = {Smart contracts are programs that reside on a block-chain. A key feature of smart contracts is their immutability, meaning that they cannot be modified once they are deployed. Despite existing efforts to uncover vulnerabilities, a common assumption is that loop structures rarely occur in smart contracts. Traditional search-based algorithms encounter challenges in uniform exploration, particularly in complex control flow paths where certain paths are more critical than others for fault detection. To overcome this challenge, we propose an adaptive probabilistic sampling strategy that allows for targeted exploration of critical paths. The proposed adaptive probabilistic sampling strategy empowers search-based algorithms to select individuals covering paths with defects. Experimental results demonstrate the efficacy of the proposed adaptive probabilistic sampling strategy in ensuring targeted exploration of critical paths within smart contract loops and enhanced mutant-killing capabilities when combined with search-based algorithms.}, notes = {GECCO-2024 SBSE A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bala:2024:GECCOcomp, author = {Indu Bala and Dikshit Chauhan and Lewis Mitchell}, title = {Orthogonally Initiated Particle Swarm Optimization with Advanced Mutation for {Real-Parameter} Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Christian Blum and Paola Pellegrini}, pages = {735--738}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {particle swarm optimization, swarm intelligence, metaheuristic optimization, orthogonal initialization: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654214}, size = {4 pages}, abstract = {This article introduces an enhanced particle swarm optimizer (PSO), termed Orthogonal PSO with Mutation (OPSO-m). Initially, it proposes an orthogonal array-based learning approach to cultivate an improved initial swarm for PSO, significantly boosting the adaptability of swarm-based optimization algorithms. The article further presents archive-based learning strategies, dividing the population into regular and elite sub-groups. Each subgroup employs distinct learning mechanisms. The regular group utilizes efficient learning schemes derived from three unique archives, which categorize individuals based on their quality levels. Additionally, a mutation strategy is implemented to update the positions of elite individuals. Comparative studies are conducted to assess the effectiveness of these learning strategies in OPSO-m, evaluating its optimization capacity through exploration-exploitation dynamics and population diversity analysis. The proposed OPSO-m model is tested on real-parameter challenges from the CEC 2017 suite in various dimensional search spaces. OPSO-m exhibits distinguished performance in the precision of solutions, rapidity of convergence, efficiency in search, and robust stability, thus highlighting its superior aptitude for resolving optimization problems.}, notes = {GECCO-2024 SI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(dziwinski:2024:GECCOcomp, author = {Piotr Dziwinski}, title = {A new Hybrid Parameter Independent Particle Swarm Optimization Algorithm}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Christian Blum and Paola Pellegrini}, pages = {739--742}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {particle swarm optimization, genetic algorithms, hybridization, parameter tuning and algorithm configuration, benchmark problems, Swarm Intelligence: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654345}, size = {4 pages}, abstract = {The Particle Swarm Optimization (PSO) algorithm is a well-known and widely used technique for solving complex optimization problems, often providing very good results. However, precise parameter selection or auto-adaptation mechanisms are crucial for ensuring the optimal performance of the algorithm. Unfortunately, adjusting the parameter values is difficult due to their mutual dependence.To address these limitations of traditional PSO, a new algorithm called Hybrid Parameters Independent Particle Swarm Optimization has been proposed. This new algorithm introduces a novel idea for calculating velocity with independent parameters. These parameters independently influence three basic behaviors of individuals: social or cognitive behavior only, the influence of the velocity from the previous steep, and the rate of decrease in movement speed. Moreover, the introduced genetic operators significantly support the searching process and extend the range of the parameters, for which the algorithm has good performance.The conducted experiments on chosen functions have shown how a novel formula streamlines parameter selection and elucidates the genetic operators' influence on particle movement efficiency. Furthermore, the experiments on established benchmark functions confirm the efficacy of the proposed approach compared to various PSO algorithm modifications and contemporary swarm algorithms.}, notes = {GECCO-2024 SI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(fukuhara:2024:GECCOcomp, author = {So Fukuhara and Masao Arakawa}, title = {Particle Swarm Optimization with Unfeasible Personal Best for Constrained Problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Christian Blum and Paola Pellegrini}, pages = {743--746}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {particle swarm optimization, swarm intelligence, heuristics, benchmark problem, constraint handling: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654309}, size = {4 pages}, abstract = {There are many requirements and issues in engineering applications, and most of real-world optimization problems are constrained to take them into account. In recent years, various constraint-handling PSOs have been developed and widely used for their power and versatility to solve those problems. The most adopted method due to its simplicity and ease of implementation is penalty function methods which allow to convert the problem in an unconstrained problem. However, despite the popularity of penalty function methods, they have numerous limitations. Notably, they are not a good choice when the optimal solution lies on the boundary the feasible and the infeasible regions, which occurs in many constrained problems. In this study, we propose the following ideas to extend the efficacy of penalty function methods based on PSO - new setting for personal best and explosion. The personal best setting uses unfeasible solution with good fitness as personal best, enabling PSO to find optimal solution that lies at the border of the feasible region faster. The explosion is kind of mutation in genetic algorithm. The proposed method was tested for well-known constrained problems, and the advantage of the method is shown by comparing the results of other constraint-handling PSOs.}, notes = {GECCO-2024 SI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(li:2024:GECCOcomp4, author = {Xu Li and Min Cao and Jian-Yu Li and Jing Xu and Jun Zhang and Zhi-Hui Zhan}, title = {{Matrix-Based} Ant Colony Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Christian Blum and Paola Pellegrini}, pages = {747--750}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {ant colony optimization, matrix-based evolutionary computation, parallel computing, Swarm Intelligence: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654174}, size = {4 pages}, abstract = {Ant colony optimization (ACO) algorithm, as a metaheuristic approach in the evolutionary computation (EC) community, has achieved remarkable success in various fields. However, traditional ACO algorithms face the challenge of long running time in solving large-scale problems. Recently, a novel matrix-based EC (MEC) framework has been proposed to speed up the computational efficiency of EC algorithms via matrix-based operations, which is potential in solving large-scale optimization problems efficiently. However, as ACO typically performs route construction and pheromone update sequentially, accelerating ACO with matrix-based operations is challenging. To this end, we propose matrix-based operators for route construction and pheromone update in ACO. The core idea involves two key aspects. Firstly, we propose a matrix-based representation for ant routes, pheromones, and heuristic information. Secondly, we utilize matrix operations to parallelize computation to reduce computational time. Based on the above, we apply the matrix-based operators to classical ACO for proposing a matrix-based ACO (MACO). The experimental results demonstrate that compared to the original version, the proposed MACO significantly enhances the efficiency of the algorithms while maintaining its problem-solving ability.}, notes = {GECCO-2024 SI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(islam:2024:GECCOcomp, author = {Md. Shahrukh Islam and Somonindro Roy and Md Adnan Arefeen and Muhammad Ali Nayeem}, title = {{ClientShield:} Malicious Clients Avoidance in Federated Learning with the Help of an Ant}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Christian Blum and Paola Pellegrini}, pages = {751--754}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {Swarm Intelligence: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654435}, size = {4 pages}, abstract = {Federated learning (FL) has garnered significant attention for its potential to enable collaborative model training across decentralized devices while preserving data privacy. However, the presence of some malicious clients, who deliberately add random noise to their local weights, seriously hampers the overall training process. We address this challenge by introducing a specialized client selection problem and proposing the ClientShield algorithm, which subsamples a fraction of honest clients based on the pheromone profile maintained by the Ant System. It employs a reinforcement phase to reward clients demonstrating improved accuracy while penalizing malevolent clients selectively. Our approach significantly outperforms random client selection methods, achieving approximately 60\% accuracy in the presence of 10\% malicious clients, compared to a mere 10\% accuracy with random selection. This research contributes to the advancement of FL, enhancing its applicability and security in real-world scenarios.}, notes = {GECCO-2024 SI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yang:2024:GECCOcomp3, author = {Luming Yang and Tao Jiang and Ran Cheng}, title = {Tensorized Ant Colony Optimization for {GPU} Acceleration}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Christian Blum and Paola Pellegrini}, pages = {755--758}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {tensorization, ACO, GPU, adaptive independent roulette, TSP, Swarm Intelligence: Poster}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3654394}, size = {4 pages}, abstract = {Ant Colony Optimization (ACO) is renowned for its effectiveness in solving Traveling Salesman Problems, yet it faces computational challenges in CPU-based environments, particularly with large-scale instances. In response, we introduce a Tensorized Ant Colony Optimization (TensorACO) to utilize the advancements of GPU acceleration. As the core, TensorACO fully transforms ant system and ant path into tensor forms, a process we refer to as tensorization. For the tensorization of ant system, we propose a preprocessing method to reduce the computational overhead by calculating the probability transition matrix. In the tensorization of ant path, we propose an index mapping method to accelerate the update of pheromone matrix by replacing the mechanism of sequential path update with parallel matrix operations. Additionally, we introduce an Adaptive Independent Roulette (AdaIR) method to overcome the challenges of parallelizing ACO's selection mechanism on GPUs. Comprehensive experiments demonstrate the superior performance of TensorACO achieving up to 1921\texttimes{} speedup over standard ACO. Moreover, the AdaIR method further improves TensorACO's convergence speed by 80\% and solution quality by 2\%. Source codes are available at https://github.com/EMI-Group/tensoraco.}, notes = {GECCO-2024 SI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(banzhaf:2024:GECCOcomp, author = {Wolfgang Banzhaf and Ting Hu}, title = {Linear Genetic Programming}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {759--771}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {genetic algorithms, genetic programming}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648422}, size = {13 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(branke:2024:GECCOcomp, author = {Juergen Branke and Sebastian Rojas-Gonzalez and Ivo Couckuyt}, title = {Bayesian Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {772--790}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648403}, size = {19 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(doerr:2024:GECCOcomp2, author = {Carola Doerr and Diederick Vermetten and Jacob {de Nobel} and Thomas Baeck}, title = {Benchmarking and Analyzing Iterative Optimization Heuristics with {IOHprofiler}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {791--799}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648404}, size = {9 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(doerr:2024:GECCOcomp3, author = {Benjamin Doerr}, title = {A Gentle Introduction to Theory (for {Non-Theoreticians)}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {800--829}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648402}, size = {30 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(eiben:2024:GECCOcomp, author = {Agoston E. Eiben}, title = {Robot Evolution: from Virtual to Real}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {830--855}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648427}, size = {26 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(flageat:2024:GECCOcomp, author = {Manon Flageat and Bryan Lim and Antoine Cully}, title = {Evolutionary Reinforcement Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {856--882}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648416}, size = {27 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ishibuchi:2024:GECCOcomp, author = {Hisao Ishibuchi and Lie Meng Pang and Ke Shang}, title = {New Framework of {Multi-Objective} Evolutionary Algorithms with Unbounded External Archive}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {883--902}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648424}, size = {20 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lehre:2024:GECCOcomp, author = {Per Kristian Lehre}, title = {Runtime Analysis of Population-based Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {903--927}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648428}, size = {25 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(miikkulainen:2024:GECCOcomp, author = {Risto Miikkulainen}, title = {Evolution of Neural Networks}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {928--945}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648407}, size = {18 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(moraglio:2024:GECCOcomp, author = {Alberto Moraglio and Francisco Chicano}, title = {Introduction to Quantum Optimization}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {946--972}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648420}, size = {27 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(o'reilly:2024:GECCOcomp, author = {Una-May O'Reilly and Erik Hemberg}, title = {Using Large Language Models for Evolutionary Search}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {973--983}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648432}, size = {11 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ochoa:2024:GECCOcomp, author = {Gabriela Ochoa and Katherine Malan}, title = {Landscape Analysis of Optimisation Problems and Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {984--1004}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648421}, size = {21 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pillay:2024:GECCOcomp, author = {Nelishia Pillay}, title = {Transfer Learning in Evolutionary Spaces}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1005--1016}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648431}, size = {12 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rothlauf:2024:GECCOcomp, author = {Franz Rothlauf}, title = {Representations for Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1017--1037}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648426}, size = {21 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(siddique:2024:GECCOcomp, author = {Abubakar Siddique and Will N. Browne and Ryan J. Urbanowicz}, title = {Evolutionary Machine Learning for Interpretable and eXplainable {AI}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1038--1068}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3658378}, size = {31 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(tauritz:2024:GECCOcomp, author = {Daniel R. Tauritz and John R. Woodward}, title = {Generative Hyper-heuristics}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1069--1095}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648417}, size = {27 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(thierens:2024:GECCOcomp, author = {Dirk Thierens and Peter A. N. Bosman}, title = {{Model-Based} Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1096--1126}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648423}, size = {31 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(whitley:2024:GECCOcomp, author = {L. Darrell Whitley}, title = {Next Generation Genetic Algorithms - efficient crossover and local search and new results on crossover lattices}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1127--1157}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648425}, size = {31 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(xue:2024:GECCOcomp, author = {Bing Xue and Mengjie Zhang}, title = {Evolutionary Computation for Feature Selection and Feature Construction}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1158--1186}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648411}, size = {29 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yazdani:2024:GECCOcomp, author = {Danial Yazdani and Xin Yao}, title = {A Deep Dive into Robust Optimization Over Time: Problems, Algorithms, and Beyond}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1187--1196}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648401}, size = {10 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(brownlee:2024:GECCOcomp, author = {Alexander Edward Ian Brownlee and Saemundur Oskar Haraldsson and John Robert Woodward and Markus Wagner}, title = {Genetic Improvement: Taking real-world source code and improving it using computational search methods}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1197--1230}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {genetic improvement}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648418}, size = {34 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bi:2024:GECCOcomp, author = {Ying Bi and Stefano Cagnoni and Yanan Sun}, title = {Evolutionary Computation and Evolutionary Deep Learning for Image Analysis, Signal Processing and Pattern Recognition}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1231--1260}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648410}, size = {30 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(coello-coello:2024:GECCOcomp, author = {Carlos Artemio {Coello Coello}}, title = {{Constraint-Handling} Techniques used with Evolutionary Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1261--1283}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648406}, size = {23 pages}, abstract = {Evolutionary Algorithms (EAs) have been found successful in the solution of a wide variety of optimization problems. However, EAs are unconstrained search techniques. Thus, incorporating constraints into the fitness function of an EA is an open research area.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(deb:2024:GECCOcomp, author = {Kalyanmoy Deb and Ankur Sinha}, title = {Evolutionary Bilevel Optimization: Algorithms and Applications}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1284--1312}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648409}, size = {29 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(eftimov:2024:GECCOcomp, author = {Tome Eftimov and Peter Korosec}, title = {Statistical Analyses for Single-objective Stochastic Optimization Algorithms}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1313--1327}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664202}, size = {15 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(mei:2024:GECCOcomp, author = {Yi Mei and Guenther Raidl}, title = {Evolutionary Computation meets Machine Learning for Combinatorial Optimisation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1328--1351}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648413}, size = {24 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(neumann:2024:GECCOcomp, author = {Frank Neumann and Aneta Neumann and Hemant Kumar Singh}, title = {Evolutionary computation for stochastic problems}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1352--1368}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648412}, size = {17 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(smith-miles:2024:GECCOcomp, author = {Kate Smith-Miles and Mario Andres Munoz and Sevvandi Kandanaarachchi}, title = {Instance Space Analysis and Item Response Theory for Algorithm Testing}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1369--1390}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648419}, size = {22 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sudholt:2024:GECCOcomp, author = {Dirk Sudholt and Giovanni Squillero}, title = {Theory and Practice of Population Diversity in Evolutionary Computation}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1391--1409}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648430}, size = {19 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(toutouh:2024:GECCOcomp, author = {Jamal Toutouh and Una-May O'Reilly}, title = {Coevolutionary Computation for Adversarial Deep Learning}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1410--1431}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648405}, size = {22 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(knowles:2024:GECCOcomp, author = {Joshua Knowles and Weijie Zheng}, title = {Evolutionary Multiobjective Optimization {(EMO)}}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1432--1459}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648415}, size = {28 pages}, abstract = {Evolutionary Multiobjective Optimization (EMO) is the commonly used term for the study and development of evolutionary algorithms to tackle optimization problems with at least two conflicting optimization objectives. The first methods were proposed in the 1980s, and the field has gradually emerged as one of the most innovative and popular areas of evolutionary computation, with a reach extending far beyond its niche beginnings.Today, EMO methods are frequently developed and adopted by researchers from other areas of optimization and decision making, and are put to use in a wealth of applications.This tutorial will be a fresh look at the current state of EMO suitable for those new to the field and those who are experienced but wish to keep up-to-date with a selected tour of the latest ideas, theory, and applications. We will begin with a gentle introduction to the fundamental ideas, but will neglect a comprehensive history in order to spend more time on the most surprising and most secure results, and interesting research lines that still need further deep exploration.1. Why multiobjective? There are several motivating reasons often neglected2. Why evolutionary? We will re-examine the usual population-based justification3. Elitism and archiving, from basics to the latest synthesis of theoretical results4. NSGA-II: reflections on a behemoth, and new theoretical results5. Performance assessment and benchmarking best practices6. How many objectives? Why decreasing and increasing number of objectives can both work (surveying objective reduction and multi-objectivization)7. Decomposition and cooperative problem solving8. When and how to include the elusive decision maker (DM), including visualization, and replacing the human decision maker with machines9. Asynchronous EMO methods (for objectives of differing latency)10. Automatic design and tuning of EMO algorithmsThe tutorial will not include a comprehensive survey of applications, but selected interesting applications will serve to reflect on the challenges in the use of EMO in practice.}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(machado:2024:GECCOcomp, author = {Penousal Machado and Joao Correia}, title = {Evolutionary Art and Design in the Machine Learning Era}, booktitle = {Proceedings of the 2024 Genetic and Evolutionary Computation Conference Companion}, year = {2024}, editor = {Mengjie Zhang and Emma Hart}, pages = {1460--1501}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {Tutorial}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3648408}, size = {42 pages}, abstract = {}, notes = {GECCO-2024 A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(mitran:2024:GECCOcomp, author = {Madalina Andreea Mitran}, title = {Correlation-based Analysis of the Influence of Bound Constraint Handling Methods on Population Dynamics in Differential Evolution}, booktitle = {Analysing algorithmic behaviour of optimisation heuristics}, year = {2024}, editor = {Anna V Kononova and Daniela Zaharie and Fabio Caraffini and Thomas Baeck}, pages = {1502--1510}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {LSHADE, correlation analysis, bound constraint handling methods, population bounding box}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664180}, size = {9 pages}, abstract = {This study explores the behavior of population within LSHADE algorithm in context of bound-constrained optimization by examining a set of measures that quantify population-related performance, control parameters, and geometric characteristics. The geometric characteristics are computed using the population bounding box (PBB) concept. Furthermore, this study investigates the correlations between those measures aiming to uncover potential relationships and dependencies.}, notes = {GECCO-2024 AABOH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pluhacek:2024:GECCOcomp, author = {Michal Pluhacek and Adam Viktorin and Tomas Kadavy and Jozef Kovac and Roman Senkerik}, title = {Measuring Population Diversity in Variable Dimension Search Spaces}, booktitle = {Analysing algorithmic behaviour of optimisation heuristics}, year = {2024}, editor = {Anna V Kononova and Daniela Zaharie and Fabio Caraffini and Thomas Baeck}, pages = {1511--1519}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {variable dimension, diversity, evolutionary computation}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664170}, size = {9 pages}, abstract = {Measuring diversity in evolutionary algorithms presents a complex challenge, especially in optimization tasks with variable dimensionality. Current literature offers limited insights on effectively quantifying diversity under these conditions. This paper addresses this gap by evaluating the effectiveness of conventional diversity measures in variable dimension contexts and identifying their limitations. We introduce a novel diversity measurement approach tailored to these dynamic environments. Our method comprehensively captures both the structural and parametric diversity of populations, providing a more nuanced understanding of diversity changes over time. Through a series of experimental scenarios, we demonstrate that our proposed measure effectively tracks the evolution of diversity in populations with variable dimensions.}, notes = {GECCO-2024 AABOH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(qiao:2024:GECCOcomp, author = {Yukai Qiao and Marcus Gallagher}, title = {Analyzing the Runtime of the Gene-pool Optimal Mixing Evolutionary Algorithm {(GOMEA)} on the Concatenated Trap Function}, booktitle = {Analysing algorithmic behaviour of optimisation heuristics}, year = {2024}, editor = {Anna V Kononova and Daniela Zaharie and Fabio Caraffini and Thomas Baeck}, pages = {1520--1526}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithm, recombination, runtime analysis}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664158}, size = {7 pages}, abstract = {The Gene-pool Optimal Mixing Evolutionary Algorithm (GOMEA) is a state of the art evolutionary algorithm that leverages linkage learning to efficiently exploit problem structure. By identifying and preserving important building blocks during variation, GOMEA has shown promising performance on various optimization problems. In this paper, we provide the first runtime analysis of GOMEA on the concatenated trap function, a challenging benchmark problem that consists of multiple deceptive subfunctions. We derived an upper bound on the expected runtime of GOMEA with a truthful linkage model, showing that it can solve the problem in O(m3 2k) with high probability, where m is the number of subfunctions and k is the subfunction length. This is a significant speedup compared to the (1+1) EA, which requires O(ln(m)(mk)k) expected evaluations.}, notes = {GECCO-2024 AABOH A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(frazier:2024:GECCOcomp, author = {James Gunder Frazier and Thomas Helmuth}, title = {Explaining Automatically Designed Software Defined Perimeters with a Two Phase Evolutionary Computation System}, booktitle = {Evolutionary Computation and Decision Making}, year = {2024}, editor = {Tinkle Chugh and Richard Allmendinger}, pages = {1527--1535}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, evolutionary computation, decision making, cybersecurity, networks}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664155}, size = {9 pages}, abstract = {Software Defined Perimeter (SDP) is a zero-trust network-isolation defense technique which aims to limit security risks by giving dynamic account type assignments to network users. Despite SDP being proven as an effective defense strategy in various domains, it has yet to see wide-spread use due to its drawbacks. One of SDP's most pressing issues is the need for an expert to manually configure it for each unique application. Here we describe a novel system for designing SDP networks called SDPush which can automatically design and analyze possible configurations for a given network with user-specifications. Since there is not a systematic approach for account type design and assignment, we develop a two-step optimization system consisting of a bitstring genetic algorithm and a genetic programming sub-system for designing and evaluating SDP networks respectively. In order to evolve an SDP configuration exhibiting the user-specified characteristics while also minimizing security risk, we implement our system to support multi-objective search spaces by providing the system's training set with different cases aimed at evaluating different aspects of the network configuration. We present initial results of experiments on networks of varying size and characteristic requirements.}, notes = {GECCO-2024 EC+DM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(guo:2024:GECCOcomp2, author = {Zhiye Guo and Jingxuan Wei and Binhao Liang and Fengqin Liang}, title = {Dynamic Constrained Multiobjective Algorithm Based on Feasible Region Prediction}, booktitle = {Evolutionary Computation and Decision Making}, year = {2024}, editor = {Tinkle Chugh and Richard Allmendinger}, pages = {1536--1543}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {dynamic optimization, time-varying constraints, multi-objective}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664110}, size = {8 pages}, abstract = {Being prevalent in real-world scenarios, dynamic constrained multiobjective optimization problems (DCMOP) are hard to solve due to their continuously and slowly variable objectives and constraints. Although researches on dynamic multi-objective optimization algorithms are popular in recent years, most of them do not take dynamic constraints into account. A feasible region prediction strategy (FRP) is proposed to handle multiple constraints changes. Firstly, boundary solutions are used to construct predictors, which will track the feasible region. Secondly, when drastic variations of constraint changes are detected, a series of predictors will be integrated to explore the moving trend of feasible regions. The proposed algorithm is tested on 8 dynamic problems and compared with other algorithms. The experimental results show that the proposed algorithm has significant advantages on predictive solutions compared with other algorithms. This proves FRP has the ability of searching the feasible region quickly while consuming less resources.}, notes = {GECCO-2024 EC+DM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(de-sa:2024:GECCOcomp, author = {Alex G. C. {de Sa} and David B. Ascher}, title = {Towards Evolutionary-based Automated Machine Learning for Small Molecule Pharmacokinetic Prediction}, booktitle = {14th Workshop on Evolutionary Computation for the Automated Design of Algorithms}, year = {2024}, editor = {Daniel Tauritz Auburn and John R. Woodward}, pages = {1544--1553}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, AutoML, bio(chem)informatics, grammar-based genetic programming, small molecules, pharmacokinetics}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664166}, size = {10 pages}, abstract = {Machine learning (ML) is revolutionising drug discovery by expediting the prediction of small molecule properties essential for developing new drugs. These properties - including absorption, distribution, metabolism and excretion (ADME) - are crucial in the early stages of drug development since they provide an understanding of the course of the drug in the organism, i.e., the drug's pharmacokinetics. However, existing methods lack personalisation and rely on manually crafted ML algorithms or pipelines, which can introduce inefficiencies and biases into the process. To address these challenges, we propose a novel evolutionary-based automated ML method (AutoML) specifically designed for predicting small molecule properties, with a particular focus on pharmacokinetics. Leveraging the advantages of grammar-based genetic programming, our AutoML method streamlines the process by automatically selecting algorithms and designing predictive pipelines tailored to the particular characteristics of input molecular data. Results demonstrate AutoML's effectiveness in selecting diverse ML algorithms, resulting in comparable or even improved predictive performances compared to conventional approaches. By offering personalised ML-driven pipelines, our method promises to enhance small molecule research in drug discovery, providing researchers with a valuable tool for accelerating the development of novel therapeutic drugs.}, notes = {GECCO-2024 ECADA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(gerber:2024:GECCOcomp, author = {Mia Gerber and Nelishia Pillay}, title = {Dynamic Neural Architecture Search for Image Classification}, booktitle = {14th Workshop on Evolutionary Computation for the Automated Design of Algorithms}, year = {2024}, editor = {Daniel Tauritz Auburn and John R. Woodward}, pages = {1554--1562}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neural architecture search, genetic algorithms, image classification, dynamic neural architecture search}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664117}, size = {9 pages}, abstract = {Neural architecture search (NAS) has proven to be effective at automating the design of neural network architectures and hyperparameters. The design produced by NAS is generated offline and remains the same throughout the learning process of the neural network. We refer to this as static neural architecture search (SNAS). We hypothesis that changing the design and design options on different epochs of the learning process will be more effective than using the same design throughout the process. This study investigates this hypothesis by introducing the concept of dynamic neural architecture search (DNAS). This research forms part of a larger initiative investigating changing designs in real-time based on feedback on the progression of the search in the space using the design. However, this study focuses on just the first aspect of changing the designs in real-time which we refer to as dynamic neural architecture search. A genetic algorithm is used in both SNAS and DNAS to generate designs comprising the architecture and hyperparameters. The performance of SNAS and DNAS are evaluated on the MNIST, CIFAR-10, CIFAR-100, Celebrity Faces, Movie Success, Mosquito, Melanoma and FruitsGB datasets. For all datasets DNAS was found to produce better accuracies than SNAS with a minimal increase in runtimes at the 95\% level of confidence. Furthermore, the results were found to be comparative to state of the art approaches with DNAS producing an improvement over the best know results for the Melanoma dataset.}, notes = {GECCO-2024 ECADA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(xia:2024:GECCOcomp, author = {Pengfei Xia and Ziqiang Li and Bin Li}, title = {Tightening the Approximation Error of Adversarial Risk with Auto Loss Function Search}, booktitle = {14th Workshop on Evolutionary Computation for the Automated Design of Algorithms}, year = {2024}, editor = {Daniel Tauritz Auburn and John R. Woodward}, pages = {1563--1572}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {deep neural networks, adversarial examples, adversarial risk, approximation error, auto loss function search}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664104}, size = {10 pages}, abstract = {How to accurately evaluate the adversarial robustness of Deep Neural Networks (DNNs) is critical for their deployment in real-world applications. An ideal indicator of robustness is adversarial risk. Unfortunately, since it involves maximizing the 0--1 loss, calculating the true risk is technically intractable. The most common solution for this is to compute an approximate risk by replacing the 0--1 loss with a surrogate, such as Cross-Entropy loss. However, these functions are all manually designed and may not be well suited for adversarial robustness evaluation. In this paper, we leverage AutoML to tighten the gap between the true and approximate risks. First, AutoLoss-AR, the first method to search for surrogate losses for adversarial risk is proposed. The experimental results on 10 adversarially trained models demonstrate the effectiveness of the proposed method: the risks evaluated using the best-discovered losses are 0.2\% to 1.6\% better than those evaluated with baselines. Second, 5 surrogate losses with clean and readable formulas are distilled out and tested on 7 unseen adversarially trained models. These losses outperform the baselines by 0.8\% to 2.4\%, indicating that they can be used individually as some kind of new knowledge. Our code is publicly available at https://github.com/xpf/Tightening-Approximation-Error.}, notes = {GECCO-2024 ECADA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(adair:2024:GECCOcomp, author = {Jason Adair and Sarah L Thomson and Alexander E. I. Brownlee}, title = {Explaining evolutionary feature selection via local optima networks}, booktitle = {Evolutionary Computation and Explainable AI}, year = {2024}, editor = {John McCall and Jaume Bacardit and Giovanni Iacca}, pages = {1573--1581}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {fitness landscapes, explainable AI, local optima networks (LONs), XAI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664183}, size = {9 pages}, abstract = {We analyse fitness landscapes of evolutionary feature selection to obtain information about feature importance in supervised machine learning. Local optima networks (LONs) are a compact representation of a landscape, and can potentially be adapted for use in explainable artificial intelligence (XAI). This work examines their applicability for discerning feature importance in supervised machine learning datasets. We visualise aspects of feature selection LONs for a breast cancer prediction dataset as case study, and this process reveals information about the composition of feature sets for the underlying ML models. The estimations of feature importance obtained from LONs are compared with the coefficients extracted from logistic regression models (interpretable AI), and also against feature importances obtained through an established XAI technique: SHAP (explainable AI). We find that the features present in the LON are not strongly correlated with the model coefficients and SHAP values derived from a model trained prior to feature selection, nor are they strongly correlated within similar groups of local optima after feature selection, calling into question the effects of constraining the feature space for wrapper-based techniques based on such ranking metrics.}, notes = {GECCO-2024 ECXAI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(jakubik:2024:GECCOcomp, author = {Jan Jakubik and Halina Kwasnicka}, title = {Drawing Attributions From Evolved Counterfactuals}, booktitle = {Evolutionary Computation and Explainable AI}, year = {2024}, editor = {John McCall and Jaume Bacardit and Giovanni Iacca}, pages = {1582--1589}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {machine learning, multi-objective optimization, explainable artificial intelligence, genetic algorithms, XAI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664122}, size = {8 pages}, abstract = {eXplainable Artificial intelligence (XAI) has grown in popularity in recent years due to the great demand for black-box machine learning models, particularly deep neural networks. With no universal definition of what constitutes an explanation, Counterfactual Explanations and Feature Attributions emerged as the two most popular explanation modes in XAI literature. This paper proposes an evolutionary feature atttribution method based on the connection between counterfactuals and attributions. We define counterfactual search as a Multi-Objective Optimization problem, solve it with the evolutionary NSGA-III algorithm and then demonstrate that meaningful attributions can be derived from the makeup of the evolved population of solutions. The resulting attribution method can effectively be applied to both tabular and image data and achieves results comparable with gradient-based model-dependent methods while remaining fully model-agnostic. On image datasets, the unique advantage of this approach is its lack of reliance on precomputed image segmentation, which model-agnostic methods typically require.}, notes = {GECCO-2024 ECXAI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lipinski:2024:GECCOcomp, author = {Piotr Lipinski and Klaudia Balcer}, title = {Explaining Session-based Recommendations using Grammatical Evolution}, booktitle = {Evolutionary Computation and Explainable AI}, year = {2024}, editor = {John McCall and Jaume Bacardit and Giovanni Iacca}, pages = {1590--1597}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, grammatical evolution, explainable artificial intelligence, evolutionary algorithms, recommender systems, session-based recommender systems, latent vector representations, XAI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664156}, size = {8 pages}, abstract = {This paper concerns explaining session-based recommendations using Grammatical Evolution. A session-based recommender system processes a given sequence of products browsed by a user and suggests the most relevant next product to display to the user. State-of-the-art session-based recommender systems are often a type of deep learning black box, so explaining their results is a challenge.In this paper, we propose an approach with a grammatical expression that provides explanations of recommendations generated by session-based recommender systems as well as an evolutionary algorithm, GE-XAI-SBRS, based on Grammatical Evolution, with its own initialization and crossover operators, to construct such a grammatical expression. Our approach uses latent product representations, so-called vector embeddings, generated by the recommender systems and providing some additional knowledge on dependencies between products.Computational experiments on the YooChoose dataset being one of the most popular session-based benchmarks, and the recommendations generated by the Target Attentive Graph Neural Network (TAGNN) model confirm the usefulness of the proposed approach, the efficiency of the proposed algorithm and outperforming the regular GE algorithm in the task under consideration.}, notes = {GECCO-2024 ECXAI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(valeriano:2024:GECCOcomp, author = {Maria Gabriela Valeriano and Joao Luiz Junho Pereira and Carlos Roberto {Veiga Kiffer} and Ana Carolina Lorena}, title = {Explaining instances in the health domain based on the exploration of a dataset's hardness embedding}, booktitle = {Evolutionary Computation and Explainable AI}, year = {2024}, editor = {John McCall and Jaume Bacardit and Giovanni Iacca}, pages = {1598--1606}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {machine learning, instance hardness, data-centric AI, healthcare, XAI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664113}, size = {9 pages}, abstract = {One alternative to explain the behavior of Machine Learning models in a dataset consists of identifying representative samples and inspecting their characteristics. But how can these representative samples be chosen? This paper uses a selection based on the perspective of instance hardness. Instance hardness is the difficulty level in predicting an instance's label in a dataset despite the classification model considered. A hardness embedding of a dataset where the instances are distributed according to their hardness level supports this selection. A metaheuristic is responsible for choosing instances with the best coverage of such hardness embedding. Applications in the health domain illustrate the usefulness of this approach for contrasting instances of different hardness profiles and obtaining insights about data that can help bridge the communication gap between health specialists and Machine Learning practitioners.}, notes = {GECCO-2024 ECXAI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(le-goff:2024:GECCOcomp, author = {Leni {Le Goff} and Emma Hart}, title = {Improving Efficiency of Evolving Robot Designs via {Self-Adaptive} Learning Cycles and an Asynchronous Architecture}, booktitle = {Embodied and Evolved Artificial Intelligence}, year = {2024}, editor = {Yue Xie and David Howard and Fumiya Iida and Josie Hughes}, pages = {1607--1615}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary robotics, morpho-evolution, stopping criteria, asynchronous evolution, EI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664116}, size = {9 pages}, abstract = {Algorithmic frameworks for the joint optimisation of a robot's design and controller often utilise a learning loop nested within an evolutionary algorithm to refine the controller associated with a newly generated robot design. Intuitively, it is reasonable to assume that the length of the learning period required is directly related to the complexity of the new design. Therefore, we propose a novel self-adaptive criterion that modifies the learning budget for each individual robot based on setting a target for the progress to be achieved during learning. This stopping criterion can lead to wide variance in learning times per robot evaluated. Research in other domains where variable evaluation time is also observed has suggested that asynchronous architectures are preferable in this situation, leading to improved objective performance and efficiency. We conduct a systematic comparison of synchronous and asynchronous architectures using the new learning stopping criterion in a joint optimisation task, showing that a judicious choice of target learning progress used in conjunction with an asynchronous framework provides considerably better results in terms of fitness and computational efficiency than a synchronous framework --- in the latter, the choice of target learning progress has no significant influence.}, notes = {GECCO-2024 EEAI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(li:2024:GECCOcomp5, author = {Bi Li and Boyu Chen and Nankai Lin and Xueming Yan and Yue Xie and Zhifeng Hao}, title = {Generating Synthetic Tabular Data by Using Image Generative Adversarial Networks}, booktitle = {Embodied and Evolved Artificial Intelligence}, year = {2024}, editor = {Yue Xie and David Howard and Fumiya Iida and Josie Hughes}, pages = {1616--1622}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {generative adversarial networks (GANs), tabular GANs, image GANs, data science, EI}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664120}, size = {7 pages}, abstract = {As Generative Adversarial Networks (GANs) have achieved great success in generating fake images, some researchers are trying to design novel GANs to generate synthetic tabular data (referred to as tabular GANs). However, most GAN models designed for image data (referred to as image GANs) cannot be directly applied in tabular data since images and tabular data have different structures. Developing new tabular GANs is time-consuming. To that end, this study aims to explore how to use existing image GANs to generate tabular data. To achieve this aim, we design Image to Tubular GAN framework (I2TGANF). I2TGANF firstly transforms instances in tabular data into a format that can be processed by image GANs, and the data in this format are called Two-Dimensional Feature Matrices (TDFMs) in this paper. Then, off-the-shelf image GANs are trained on TDFMs and the trained generator is used to simulate synthetic TDFMs. Finally, these synthetic TDFMs are reconstructed to synthetic tabular data by I2TGANF. Our experiments show that, by using I2TGANF, existing image GANs can be applied to generate tabular data. The results provide a novel insight to further relative research: existing image GANs have a great potential to be transferred to simulate tabular data.}, notes = {GECCO-2024 EEAI A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(da-silva:2024:GECCOcomp, author = {Matheus Muller Pereira {Da Silva} and Jaqueline Silva Angelo and Isabella Alvim Guedes and Laurent Emmanuel Dardenne}, title = {A Generative Evolutionary {Many-Objective} Framework: A Case Study in Antimicrobial Agent Design}, booktitle = {3rd GECCO workshop on Enhancing Generative Machine Learning with Evolutionary Computation (EGML-EC) 2024}, year = {2024}, editor = {Jamal Toutouh and Una-May O'Reilly and Joao Correia and Penousal Machado and Erik Hemberg}, pages = {1623--1630}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {de novo drug design, many-objective optimization, evolutionary algorithms, variational autoencoder, latent variable optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664159}, size = {8 pages}, abstract = {de novo drug design (dnDD) aims to generate novel molecules that meet several conflicting objectives, positioning it as a quintessential many-objective optimization problem (MaOP), where more than three objectives must be simultaneously optimized. This work introduces a generative chemistry framework that combines a pre-trained variational autoencoder (VAE) generative model with multi and many-objective evolutionary algorithms (EAs) to optimize multiple objectives in dnDD. Our method combines the generative power of the VAE with the optimization capabilities of multi and many-objective EAs. This allows us to navigate the generative model's latent space effectively and optimize five drug-design objectives simultaneously. These objectives include molecular similarity to a known inhibitor of an antibacterial therapeutic target and dissimilarity to a compound with known off-target effects. Among the tested algorithms, NSGA-III demonstrated superior performance in generating novel molecules adhering to the desired drug properties. Finally, we explore the implications of dealing with many-objectives in dnDD, a relatively underexplored area in the scientific literature, and suggest future avenues for research, including adopting more complex and numerous objectives and constraints. The source code used in this work is available at https://github.com/gmmsb-lncc/generative-optim.}, notes = {GECCO-2024 EGML-EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(mignaco:2024:GECCOcomp, author = {Jimena Mignaco and Gonzalo Rey and Jairo Correa and Sergio Nesmachnow and Jamal Toutouh}, title = {Empirical comparison of evolutionary approaches for searching the latent space of Generative Adversarial Networks for the human face generation problem}, booktitle = {3rd GECCO workshop on Enhancing Generative Machine Learning with Evolutionary Computation (EGML-EC) 2024}, year = {2024}, editor = {Jamal Toutouh and Una-May O'Reilly and Joao Correia and Penousal Machado and Erik Hemberg}, pages = {1631--1639}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {generative adversarial networks, evolutionary latent space exploration, human face image generation}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664147}, size = {9 pages}, abstract = {This article presents an empirical comparison of evolutionary search methods for exploring the latent space of Generative Adversarial Networks for the human face generation problem. Single- and multiobjective evolutionary algorithms are evaluated for the problem of optimizing two metrics that evaluates the similarity of generated images to a target one and the similarity to a target race attribute. The studied evolutionary algorithms are integrated into a software pipeline that also includes StyleGAN3 for human face images generation and DeepFace as the face recognition model applied to evaluate the similarity of generated images to the targets. The evolutionary algorithms explore the real-coded latent space of StyleGAN3, applying weighted sum and explicit Pareto dominance approaches. Most of the generated images are able to deceive the face recognition model in DeepFace, and numerical results demonstrate the superiority of the multiobjective approach regarding quality and diversity metrics. The obtained results have practical applications in improving the robustness of face recognition systems.}, notes = {GECCO-2024 EGML-EC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(berger:2024:GECCOcomp, author = {Bernhard Johannes Berger and Christina Plump and Lauren Paul and Rolf Drechsler}, title = {{EvoAl} - Codeless {Domain-Optimisation}}, booktitle = {Open Source Software for Evolutionary Computation}, year = {2024}, editor = {Stefan Wagner and Michael Affenzeller}, pages = {1640--1648}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664154}, size = {9 pages}, abstract = {Applying optimisation techniques such as evolutionary computation to real-world tasks often requires significant adaptation. However, specific application domains do not typically demand major changes to existing optimisation methods. The decisive aspect is the inclusion of domain knowledge and configuration of established techniques to suit the problem. Separating the optimisation technique from the domain knowledge offers several advantages: First, it allows updating domain knowledge without necessitating reimplementation. Second, it improves identification and comparison of the optimisation methods employed. We present EvoAl, an open-source data-science research tool suite that focuses on optimisation research for real-world problems. EvoAl implements the separation of domain-knowledge and detaches implementation from configuration, facilitating optimisation with little programming effort, allowing direct comparability with other approaches (using EvoAl), and ensuring reproducibility. EvoAl also includes options for surrogate models, data models for complex search spaces, data validation, and benchmarking options for optimisation researchers.}, notes = {GECCO-2024 EvoOSS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(burlacu:2024:GECCOcomp, author = {Bogdan Burlacu}, title = {Backend-agnostic Tree Evaluation for Genetic Programming}, booktitle = {Open Source Software for Evolutionary Computation}, year = {2024}, editor = {Stefan Wagner and Michael Affenzeller}, pages = {1649--1657}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, energy efficiency, symbolic regression}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664161}, size = {9 pages}, abstract = {The explicit vectorization of the mathematical operations required for fitness calculation can dramatically increase the efficiency of tree-based genetic programming for symbolic regression. In this paper, we introduce a modern software design for the seamless integration of vectorized math libraries with tree evaluation, and we benchmark each library in terms of runtime, solution quality and energy efficiency. The latter, in particular, is an aspect of increasing concern given the growing carbon footprint of AI. With this in mind, we introduce metrics for measuring the energy usage and power draw of the evolutionary algorithm. Our results show that an optimized math backend can decrease energy usage by as much as 35\% (with a proportional decrease in runtime) without any negative effects in the quality of solutions.}, notes = {GECCO-2024 EvoOSS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(ceschia:2024:GECCOcomp, author = {Sara Ceschia and Francesca {Da Ros} and Luca {Di Gaspero} and Andrea Schaerf}, title = {{EasyLocal++} a 25-year Perspective on Local Search Frameworks: The Evolution of a Tool for the Design of Local Search Algorithm}, booktitle = {Open Source Software for Evolutionary Computation}, year = {2024}, editor = {Stefan Wagner and Michael Affenzeller}, pages = {1658--1667}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {software tools, algorithm engineering, local search, metaheuristics}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664140}, size = {10 pages}, abstract = {EasyLocal++ is a white-box C++ framework for designing local search algorithms. Over the years, it has been successfully used across various domains, such as timetabling, rostering, scheduling, and logistics, and has produced state-of-the-art results in benchmark datasets and competitions. Beyond research, EasyLocal++ has found practical use in real-world and industrial settings, demonstrating the flexibility and adaptability of the framework for different applications. In this paper, we position EasyLocal++ within the existing literature by comparing its capabilities with those of available alternative/similar tools. We then trace its history from its initial design 25 years ago to the current version. Furthermore, we describe its architecture, highlighting its design principles and functionalities. We also discuss the features developed to simplify the design of local search methods and enhance their performance. Lastly, we explore potential future perspectives and developments.}, notes = {GECCO-2024 EvoOSS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pinchuk:2024:GECCOcomp, author = {Maiia Pinchuk and Grigorii Kirgizov and Lyubov Yamshchikova and Nikolay Nikitin and Irina Deeva and Karine Shakhkyan and Ivan Borisov and Kirill Zharkov and Anna Kalyuzhnaya}, title = {{GOLEM:} Flexible Evolutionary Design of Graph Representations of Physical and Digital Objects}, booktitle = {Open Source Software for Evolutionary Computation}, year = {2024}, editor = {Stefan Wagner and Michael Affenzeller}, pages = {1668--1675}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary design, graph search, open source, robotics, drug design}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664141}, size = {8 pages}, abstract = {We introduce GOLEM --- an open-source optimization framework for automated design of graph-based structures in various scientific domains. It solves the problem of finding optimal topology and parameters of graphs using evolutionary algorithms, and does it in a modular domain-agnostic way. The paper describes the framework and its flexible approach to domain adaptation. Experimental studies provide several examples of GOLEM application in different fields: Bayesian networks, drug design, and robotics.}, notes = {GECCO-2024 EvoOSS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bellanger:2024:GECCOcomp, author = {Thibaut Bellanger and Matthieu {Le Berre} and Manuel Clergue and Jin-Kao Hao}, title = {Directed Acyclic Program Graph Applied to Supervised Classification}, booktitle = {Graph-based Genetic Programming}, year = {2024}, editor = {Dennis G. Wilson and Giorgia Nadizar and Giovanni Squillero and Alberto Tonda}, pages = {1676--1680}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, evolutionary algorithm, local search, supervised classification}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664115}, size = {5 pages}, abstract = {In the realm of Machine Learning, the pursuit of simpler yet effective models has led to increased interest in decision trees due to their interpretability and efficiency. However, their inherent simplicity often limits their ability to handle intricate patterns in data. This paper introduces a novel approach termed Directed Acyclic Graphs of Programs, inspired by evolutionary strategies, to address this challenge. By iteratively constructing program graphs from binary decision makers, our method offers a balance of simplicity and performance for classification tasks. Notably, we emphasize the preservation of model interpretability and expressiveness, avoiding the use of ensemble techniques like voting. Experimental evaluations demonstrate the superiority of our approach over existing methods in terms of both effectiveness and interpretability.}, notes = {GECCO-2024 GGP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(de-la-torre:2024:GECCOcomp2, author = {Camilo {De La Torre} and Sylvain Cussat-Blanc and Dennis Wilson and Yuri Lavinas}, title = {On Search Trajectory Networks for Graph Genetic Programming}, booktitle = {Graph-based Genetic Programming}, year = {2024}, editor = {Dennis G. Wilson and Giorgia Nadizar and Giovanni Squillero and Alberto Tonda}, pages = {1681--1685}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, cartesian genetic programming, evolutionary computation, symbolic regression}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664169}, size = {5 pages}, abstract = {Cartesian Genetic Programming (CGP) allows for the optimization of interpretable function representations. However, comprehending the vast and combinatorially complex search space inherent to CGP remains challenging, particularly because multiple genotypes may correspond to identical functions. This paper studies the application of Search Trajectory Networks (STNs) to understand the search dynamics of CGP, specifically for symbolic regression tasks. Using STNs, we analyze the behavior of evolutionary search processes and uncover distinct phenomena, such as the presence of "portal" minima---critical junctures that facilitate sudden, beneficial shifts in the search trajectory, akin to findings in linear genetic programming. Our findings illustrate that while genetic interpretations are complex and often ambiguous, a functional analysis using STNs offers clear and actionable insights into the CGP search.}, notes = {GECCO-2024 GGP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(murphy:2024:GECCOcomp2, author = {Jared Murphy and Travis Desell}, title = {Minimizing the {EXA-GP} {Graph-Based} Genetic Programming Algorithm for Interpretable Time Series Forecasting}, booktitle = {Graph-based Genetic Programming}, year = {2024}, editor = {Dennis G. Wilson and Giorgia Nadizar and Giovanni Squillero and Alberto Tonda}, pages = {1686--1690}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, time series forecasting, graph-based genetic programming, neuroevolution, recurrent neural networks}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664173}, size = {5 pages}, abstract = {This work provides a modification to the Evolutionary eXploration of Augmenting Memory Models (EXA-GP) graph-based genetic programming (GGP) algorithm, enabling it to produce time series forecasting (TSF) equations that are vastly more simple and interpretable than the original implementation without heavily compromising on predictive ability. This is accomplished by eliminating a majority of all the trainable constants and initializing the algorithm with a seed computational graph in the form of using a parameter's value at time t as the forecast for the parameter's value at time t + 1. This minimal version of EXA-GP (EXA-GP-MIN) is compared to EXA-GP and EXAMM, a full blown neuroevolution algorithm for evolving recurrent neural networks for TSF, on a suite of six real world benchmark problems, with MIN-EXA-GP showing the best forecasting ability on four of the six benchmarks with significantly more interpretable genetic programs.}, notes = {GECCO-2024 GGP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(squillero:2024:GECCOcomp, author = {Giovanni Squillero and Alberto Tonda and Dimitri Masetta and Marco Sacchet}, title = {Byron: A Fuzzer for Turing-complete Test Programs}, booktitle = {Graph-based Genetic Programming}, year = {2024}, editor = {Dennis G. Wilson and Giorgia Nadizar and Giovanni Squillero and Alberto Tonda}, pages = {1691--1694}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, graph-based GP, turing-complete GP, fuzzing, test-program, assembly, real-world application}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664136}, size = {4 pages}, abstract = {This paper describes Byron, an evolutionary fuzzer of assembly-language programs for the test and verification of programmable devices. Candidate solutions are internally encoded as typed, directed multigraphs, that is, graphs where multiple edges can connect the same pair of vertexes, with an added layer that defines the type of information vertexes can hold, and constraints the possible kinds of edges. Multiple genetic operators and a self-adaptation mechanism make the tool ready to tackle industrial problems.}, notes = {GECCO-2024 GGP A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(brownlee:2024:GECCOcomp2, author = {Alexander Brownlee and Sarah Thomson and Rachael Oladapo}, title = {A {Bi-Level} Approach to Vehicle Fleet Reduction: Successful Case Study in Community Healthcare}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1695--1701}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {optimal job scheduling, evolutionary computation, bilevel optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664137}, size = {7 pages}, abstract = {We report on a case study application of metaheuristics with Argyll and Bute Health and Social Care Partnership in the West of Scotland. The Partnership maintains a fleet of pool vehicles that are available to service visits of staff to locations across a largely rural area. Maintaining such a fleet is important but costly: we show how the allocation of fleet vehicles can be formulated as a bilevel optimisation problem. At the upper level, vehicles are allocated to 'base' locations such as hospitals. At the lower level, vehicles are allocated to specific jobs. We explore local-search approaches to solving this problem. We show that some blurring of the distinction between upper and lower levels can be helpful for this problem. We also demonstrate, for our case study, a 7.1\% reduction in the vehicle fleet while still being able to meet all demand.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chitty:2024:GECCOcomp2, author = {Darren Chitty}, title = {Improving the Efficiency Of Genetic Programming for Classification Tasks Using a Phased Approach}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1702--1705}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, high performance computing, efficiency}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664184}, size = {4 pages}, abstract = {Genetic Programming (GP) uses Darwinian evolution to generate algorithms for tasks such as classification and symbolic regression. However, a drawback is the interpreter used to evaluate candidate programs adding significant computational cost. Hence, many studies have sought to improve the speed of GP primarily via parallelism. However, efficiency can also offer considerable performance gains. GP has recently been applied to combinatorial optimisation using a phased approach (Phased-GP) whereby programs are evolved piecemeal avoiding reinterpretation of sub-programs. This method was found to be highly effective and efficient compared to standard GP. This paper investigates if a similar effect is observed when using phased GP to incrementally build classifiers. Tested upon known real-world classification problems, an efficiency saving of up to 98\% can be achieved with a speedup of 70 fold and no significant loss of classification accuracy. Moreover, the method can be easily used within any existing high performance parallel GP models.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(da-ros:2024:GECCOcomp2, author = {Francesca {Da Ros} and Luca {Di Gaspero} and Marie-Louise Lackner and Nysret Musliu}, title = {Reducing Energy Consumption in Electronic Component Manufacturing through Large Neighborhood Search}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1706--1714}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {oven scheduling problem, batch scheduling problem, large neighborhood search, decision support system}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664132}, size = {9 pages}, abstract = {Amidst the ongoing climate crisis, there is a pressing need to reduce energy consumption - especially in industrial settings, as recognized by the United Nations Sustainability Goals (in particular, 9 and 12). To mitigate energy usage in production, strategically grouping compatible jobs and processing them together in batches, as captured by batch scheduling problems, is often beneficial. The Oven Scheduling Problem (OSP), is an NP-hard real-world parallel batch scheduling problem that arises in the electronic component industry. The goal of the OSP is to schedule jobs on ovens while minimizing total oven operating time, job tardiness, and setup costs. To reduce oven runtime, compatible jobs can be processed simultaneously in batches. The schedule must respect oven eligibility and availability, job release dates, setup times between batches, and oven capacity constraints.We propose and fine-tune a Large Neighborhood Search (LNS) algorithm that uses three different destroy operators and a repair operator based on an exact method (a Constraint Programming model). Our findings demonstrate that LNS significantly enhances the solution quality for many large instances compared to existing exact methods from the literature. Furthermore, we develop a user-friendly dashboard to facilitate decision-makers in the navigation of the optimization tool.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(de-winter:2024:GECCOcomp2, author = {Roy {de Winter} and Thomas Baeck and Niki {Van Stein}}, title = {Modular Optimization Framework for Mixed Expensive and Inexpensive {Real-World} Problems}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1715--1723}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {optimization, computationally expensive, constraint optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664133}, size = {9 pages}, abstract = {A modular optimization framework is proposed that can deal with different problem characteristics encountered in real-world design problems. The common denominator of problems considered in this work is the computationally demanding objective function used to evaluate the designs, while some of the constraint functions can be inexpensive. The framework uses radial basis functions as surrogates for the computationally expensive objectives while inexpensive functions can directly be used for searching promising solutions on the surrogates. The framework is adjustable for the number of continuous design parameters, the number of constraints, the number of solutions that can be evaluated in parallel, and different strategies can be selected on how to deal with the computationally inexpensive functions. We propose guidelines on how to set up the modular framework depending on the problem characteristics one has to deal with. To verify the working of the optimization framework, two different real-world ship design optimization problems have been optimized with different characteristics such as the number of constraints and how expensive they are. The results show that it is beneficial to directly exploit the inexpensive functions when possible and that parameterization of the problem is the most important step of the process.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(filatova:2024:GECCOcomp, author = {Anastasiia Filatova and Ivan Perov and Egor Timoschak and Stanislav Batalenkov and Mikhail Kovalchuk and Denis Nasonov}, title = {Advancing Project Management: Integrating Material Delivery in {Multi-Contractor} {Multi-Mode} {RCPSP}}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1724--1727}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {RCPSP, material delivery, genetic algorithm, transportation network}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664187}, size = {4 pages}, abstract = {In the realm of project management, optimizing schedules under resource constraints presents a substantial challenge. This paper presents a novel approach to project scheduling that integrates the challenges of resource constraints and material delivery within multi-contractor, multi-mode settings. Our model, designed for complex industrial projects, aims to minimize project makespan and peak resource usage across a network of construction sites and depots. We utilize Genetic Algorithms to develop algorithms that produce Paretooptimal solutions, effectively balancing time efficiency and resource consumption. The model stands out by incorporating a transportation network with road characteristics and capacity constraints at each site and warehouse, adding realism and complexity to the scheduling task. Initial results show up to a 12\% improvement in scheduling efficiency in real-world capital construction projects.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kalia:2024:GECCOcomp, author = {Subodh Kalia and Nikhil Padhye and Chilukuri Mohan}, title = {Structural Optimization with Isogeometric Representation using an Evolutionary Approach}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1728--1731}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {parametric modeling, isogeometric analysis, optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664185}, size = {4 pages}, abstract = {The engineering design of optimal geometric structures is a challenging task, successfully addressed here by evolutionary optimization in conjunction with Isogeometric Analysis. The structures are represented using three-dimensional volumetric Non-Uniform Rational B-Splines, and their geometric features are chosen as the decision variables for optimization under linear constraints. A clustering and nearest neighbor search-based surrogate model is adopted to reduce computational expense.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kheiri:2024:GECCOcomp, author = {Ahmed Kheiri and Yaroslav Pylyavskyy and Peter Jacko}, title = {Efficient Scheduling of {GECCO} Conferences using Hyper-heuristic Algorithms}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1732--1737}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {conference scheduling problem, optimisation, hyper-heuristic}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664186}, size = {6 pages}, abstract = {We propose the development of a conference scheduler tailored specifically for the Genetic and Evolutionary Computation Conference (GECCO). Our proposed flexible approach allows GECCO organisers to optimise conference schedules according to their specific needs and available resources. Using hyper-heuristic methods, our scheduler generates optimised solutions for in-person and hybrid GECCO conferences. We validate our method using data from GECCO2019 and demonstrate its effectiveness by successfully creating schedules for GECCO conferences from 2020 onwards.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(lezama:2024:GECCOcomp, author = {Fernando Lezama and Jose Almeida and Joao Soares and Zita Vale and Wenlei Bai and Kwang {Y. Lee} and Wen Zhang}, title = {Assessing {PV} Integration with Evolutionary Algorithms: Insights from the 2024 Competition on Evolutionary Computation in the Energy Domain}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1738--1744}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary computation, optimization, PV system allocation, smart grid}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664135}, size = {7 pages}, abstract = {In the field of energy systems, the "WCCI(CEC)/GECCO 2024 Competition Evolutionary Computation in the Energy Domain: Optimal PV System Allocation" serves as a platform for evaluating and comparing various metaheuristic algorithms tailored to address complex energy challenges. The current competition ranking metric, focused on mean fitness values, provides a basis for assessing algorithm performance. This work assesses several algorithms in a new testbed of the competition. The primary goal is to compare the performance of these algorithms based on their ability to address the challenges posed by the integration of PV systems in distribution networks. These results offer an early glimpse into how these metaheuristics perform, paving the way for further exploration of their capabilities.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(nourmohammadzadeh:2024:GECCOcomp, author = {Abtin Nourmohammadzadeh and Stefan Voss}, title = {A Matheuristic Algorithm to Optimise {Multi-Objective} Production Scheduling in a Manufacturing Plant with Fluctuating Raw Material Supplies}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1745--1753}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {production scheduling, uncertain material supplies, right-hand-side (RHS) uncertainty, multi-objective, robustness, dynamic parameter adjustment}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664177}, size = {9 pages}, abstract = {Production scheduling under uncertain material supplies, a common phenomenon in practical settings, presents considerable challenges, particularly within the industrial sector. The current study defines this problem as a bi-objective right-hand-side (RHS) stochastic program aiming to minimise the overall expenses associated with production and inventory as one objective, and to reduce the cumulative adverse effects resulting from product completion delays as the other objective. A mixed-integer linear model is constructed for the problem and a matheuristic algorithm, which combines variable neighbourhood search (VNS), fixed set search (FSS), and mathematical optimisation of sub-problems, is proposed to address the intricate nature of the problem. Multiple scenarios are analysed concerning the available material supplies, and robust non-dominated solutions are sought. The efficiency of the proposed algorithm is evaluated against some alternative techniques. For instance, the epsilon-constraint method is examined separately, and a hybrid metaheuristic strategy is explored, combining the non-dominated sorting genetic algorithm (NSGA-II) with multi-objective simulated annealing (MOSA). Key performance indicators including hypervolume, quality metric, and processing time are used for evaluation. Moreover, a dynamic parameter adjustment approach is implemented, demonstrating a significant level of effectiveness.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yoshikawa:2024:GECCOcomp, author = {Takushi Yoshikawa and Ryoji Tanabe}, title = {On Constructing Algorithm Portfolios in Algorithm Selection for Computationally Expensive Black-box Optimization in the Fixed-budget Setting}, booktitle = {9th Workshop on Industrial Applications of Metaheuristics (IAM 2024)}, year = {2024}, editor = {Silvino Fernandez Alzueta and Thomas Stuetzle}, pages = {1754--1761}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {feature-based algorithm selection, computationally expensive blackbox optimization, algorithm portfolios}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664127}, size = {8 pages}, abstract = {Feature-based offline algorithm selection has shown its effectiveness in a wide range of optimization problems, including the blackbox optimization problem. An algorithm selection system selects the most promising optimizer from an algorithm portfolio, which is a set of pre-defined optimizers. Thus, algorithm selection requires a well-constructed algorithm portfolio consisting of efficient optimizers complementary to each other. Although construction methods for the fixed-target setting have been well studied, those for the fixed-budget setting have received less attention. Here, the fixed-budget setting is generally used for computationally expensive optimization, where a budget of function evaluations is small. In this context, first, this paper points out some undesirable properties of experimental setups in previous studies. Then, this paper argues the importance of considering the number of function evaluations used in the sampling phase when constructing algorithm portfolios, whereas the previous studies ignored that. The results show that algorithm portfolios constructed by our approach perform significantly better than those by the previous approach.}, notes = {GECCO-2024 IAM A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(wang:2024:GECCOcomp7, author = {Yanan Wang and Yan Pei and Zerui Ma and Jianqiang Li}, title = {A {User-Guided} Generation Framework for Personalized Music Synthesis Using Interactive Evolutionary Computation}, booktitle = {Interactive Methods at GECCO}, year = {2024}, editor = {Nguyen Dang and Mohammad Nabi Omidvar}, pages = {1762--1769}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {human-AI interaction, co-creativity, music synthesis, interactive evolutionary computation, generative adversarial network, human-in-the-loop}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664109}, size = {8 pages}, abstract = {The development of generative artificial intelligence (AI) has demonstrated notable advancements in the domain of music synthesis. However, a perceived lack of creativity in the generated content has drawn significant attention from the public. To address this, this paper introduces a novel approach to personalized music synthesis, incorporating a human-in-the-loop generation. This method leverages the dual strengths of interactive evolutionary computation, known for its capturing user preferences, and generative adversarial network, renowned for its capacity to autonomously produce high-quality music. The primary objective of this integration is to augment the credibility and diversity of generative AI in music synthesis, fostering computational artistic creativity in humans. Furthermore, a user-friendly interactive music player has been designed to facilitate users in the music synthesis process. The proposed method exemplifies a paradigm wherein users manipulate latent space through human-machine interaction, underscoring the pivotal role of humans in the synthesis of diverse and creative music.}, notes = {GECCO-2024 iGECCO A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(coombe:2024:GECCOcomp, author = {Cameron Coombe and David Howard and Will Browne}, title = {Learning Classifier Systems as a Solver for the Abstraction and Reasoning Corpus}, booktitle = {27th International Workshop on Evolutionary Rule-based Machine Learning}, year = {2024}, editor = {Abubakar Siddique and Michael Heider and Hiroki Shiraishi}, pages = {1770--1778}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {learning classifier systems, abstraction, abstraction and reasoning corpus}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664157}, size = {9 pages}, abstract = {The abstraction and reasoning corpus (ARC) is a challenging AI benchmark as it requires models to learn unseen relationships from a few data points. Each puzzle only contains 2--5 training examples, which makes it hard for models that require training on large datasets. Models which do not require training on large datasets like learning classifier systems (LCSs) have the potential to solve this kind of complex, low-data problem due to their flexible representation, niche-based learning, and ability to generalise. Whilst some learners that can operate on low data have been applied to ARC, LCS-based architectures remain entirely unexplored. We propose a simple LCS architecture employing a windowing approach. This architecture solves 19 of 400 test grids (4.75\%) in the ARC training set, which shows promise by outperforming other na\"{\i}ve approaches. Additionally, the system uses minimal prior knowledge to achieve this result, bringing it closer to the original vision of an ARC solver, which is to only rely on a core set of concepts. We provide directions on how this basic model could be expanded upon to include more complex structures making use of LCSs' ability to integrate many diverse kinds of representations.}, notes = {GECCO-2024 IWERL A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(paetzel:2024:GECCOcomp2, author = {David Paetzel and Richard Nordsieck and Joerg Haehner}, title = {A Closer Look at Length-niching Selection and Spatial Crossover in Variable-length Evolutionary Rule Set Learning}, booktitle = {27th International Workshop on Evolutionary Rule-based Machine Learning}, year = {2024}, editor = {Abubakar Siddique and Michael Heider and Hiroki Shiraishi}, pages = {1779--1787}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithm, variable-length representation, metaheuristic rule set learning, learning classifier systems}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664178}, size = {9 pages}, abstract = {We explore variable-length metaheuristics for optimizing sets of rules for regression tasks by extending an earlier short paper that performed a preliminary analysis of several variants of a single-objective Genetic Algorithm. We describe more in depth the algorithm and operator variants used and document design decisions as well as the rationale behind them. The earlier work identified crossover as being detrimental for solution compactness; we take a closer look by analysing convergence behaviour of the variants tested. We are able to conclude that using one of the investigated crossover operators trades prediction error outliers for more smaller errors at the expense of solution compactness. The positive effects of length-niching selection (holding off premature convergence to a certain solution length) are undetectable in fitness values in the settings considered. We further perform comparisons with already-known rule-based algorithms XCSF and CART Decision Trees and conclude that, even without parameter tuning, the best-performing of the variants of the GA outperforms XCSF on the tasks considered, comes close to being competitive with respect to test Mean Absolute Error and creates similarly compact solutions as the Decision Tree algorithm. The 54 learning tasks considered are synthetic and in the limit learnable by rule-based algorithms.}, notes = {GECCO-2024 IWERL A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(schoenberner:2024:GECCOcomp, author = {Connor Schoenberner and Sven Tomforde}, title = {{XCS:} Is Covering All You Need?}, booktitle = {27th International Workshop on Evolutionary Rule-based Machine Learning}, year = {2024}, editor = {Abubakar Siddique and Michael Heider and Hiroki Shiraishi}, pages = {1788--1796}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {LCS, XCS, reinforcement learning, neuro-symbolic AI, covering, neural prediction, deep learning}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664146}, size = {9 pages}, abstract = {The XCS Classifier System (XCS) can be considered as the most popular and most researched Learning Classifier System (LCS) and was originally intended to learn Reinforcement Learning (RL) problems. However, over time it became clear that the system suffers under issues limiting its performance in multi-step RL problems. This paper provides empirical insights on how the Genetic Algorithm (GA) and the covering mechanism of XCS contribute to the performance in multi-step RL environments, by experimenting with mazes and the Frozen Lake environment. It meets this objective by testing the hypothesis that XCS is able to learn these environments only using covering without activating the GA when using neural prediction or Recursive Linear Least Squares (RLS) prediction. The experiments affirm this hypothesis for neural prediction in several instances, while XCS with linear RLS prediction appears to require the GA. When using neural prediction, the GA of XCS produces considerably more rules than necessary for small performance gains at best or increased instability at worst.}, notes = {GECCO-2024 IWERL A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(siddique:2024:GECCOcomp2, author = {Abubakar Siddique and Michael Heider and Muhammad Iqbal and Hiroki Shiraishi}, title = {A Survey on Learning Classifier Systems from 2022 to 2024}, booktitle = {27th International Workshop on Evolutionary Rule-based Machine Learning}, year = {2024}, editor = {Abubakar Siddique and Michael Heider and Hiroki Shiraishi}, pages = {1797--1806}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {learning classifier systems, evolutionary computation, machine learning, explainable AI, modular learning, transfer learning}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664165}, size = {10 pages}, abstract = {Learning classifier systems (LCSs) are a state-of-the-art methodology for developing rule-based machine learning by applying discovery algorithms and learning components. LCSs have become proficient at linking environmental features to describe simple patterns in data. They have a natural ability to split a solution into niches. The decision-making process of an LCS-based system is interpretable, which is a step toward explainable AI. A broad range of LCS-based applications have been developed to solve real-world problems. The International Workshop on Learning Classifier Systems (IWLCS) is one of the pioneer and successful workshops at GECCO. It serves as a beacon for the next generation of researchers, inspiring them to delve deep into evolutionary rule-based machine learning, with a particular focus on LCSs. This work follows the tradition of previous surveys at the workshop and provides an overview of the LCS-related publications from March 2022 to March 2024. Based on the nature of contributions, the publications selected for review are divided into the following five groups: (i) Theoretical and Architectural Enhancements, (ii) Explainability, (iii) Applications, (iv) Role of Metaheuristics in LCSs, and (v) Miscellaneous Contributions. This survey provides an easy entry point to the most recent progress and achievements in the field of LCSs.}, notes = {GECCO-2024 IWERL A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(surhonne:2024:GECCOcomp, author = {Anmol Surhonne and Manuel Wensauer and Florian Maurer and Thomas Wild and Andreas Herkersdorf}, title = {{XCS} with dynamic sized experience replay for memory constrained applications}, booktitle = {27th International Workshop on Evolutionary Rule-based Machine Learning}, year = {2024}, editor = {Abubakar Siddique and Michael Heider and Hiroki Shiraishi}, pages = {1807--1814}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {XCS, experience replay, memory constraint}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664148}, size = {8 pages}, abstract = {The eXtended Classifier System (XCS) is the most widely studied classifier system in the community. It is a class of interpretable AI which has shown strong capability to master various classification and regression tasks. It has also shown strong performance in certain multi-step environments in the reinforcement learning domain. XCS consists of a population of classifiers of size N which is decided at design time. The population size N is typically large to provide room for the learning and generalization mechanism of XCS. Experience replay (ER) is a popular technique in reinforcement learning which significantly improves the learning of the agents. ER uses a replay memory of fixed size which is defined at design time. Typically XCS has been trained on high-performance computers or servers which have near to no limitations on memory. XCS when applied to embedded applications or IoT devices are constrained by the memory consumption. This memory constraint affects the population size and the size of the replay memory in ER. In this work, we propose XCS with dynamic sized experience replay, where the size of the replay memory is resized inversely proportional to the number of macro-classifiers in the population to maximize the performance within a memory constraint.}, notes = {GECCO-2024 IWERL A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(yasuda:2024:GECCOcomp, author = {Yusuke Yasuda and Kenichi Tamura and Keiichiro Yasuda}, title = {Analyzing Violation Landscapes Using Different Definitions of Constraint Violation}, booktitle = {Landscape-Aware Heuristic Search}, year = {2024}, editor = {Nadarajen Veerapen and Arnaud Liefooghe and Sebastien Verel and Gabriela Ochoa}, pages = {1815--1823}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {constrained optimization, constraint violation, violation landscape, landscape analysis}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664118}, size = {9 pages}, abstract = {Black-Box Optimization (BBO) deals with optimization scenarios where objective functions and constraint functions cannot be explicitly expressed. Metaheuristic algorithms are powerful search methods for BBO, yet their search performance depends on the function landscape. Landscape analysis contributes to characterizing BBO functions with unknown internal processes, aiding in the design and selection of suitable algorithms. In constrained optimization, the violation landscape is defined according to constraint violations. While constraint violations can be flexibly defined as long as they form the same feasible region, a comprehensive analysis of violation landscapes based on the definition of constraint violations has not been conducted. This paper characterizes violation landscapes using different definitions of constraint violation on constrained single-objective optimization benchmarks. The results of the analysis with various landscape measures demonstrate that the features of the violation landscape clearly change according to the definition of constraint violations. This provides novel insights that contribute to the appropriate design of violation landscapes.}, notes = {GECCO-2024 LAHS A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bin-murtaza:2024:GECCOcomp, author = {Sardar {Bin Murtaza} and Aidan Mccoy and Zhiyuan Ren and Aidan Murphy and Wolfgang Banzhaf}, title = {{LLM} Fault Localisation within Evolutionary Computation Based Automated Program Repair}, booktitle = {Large Language Models for and with Evolutionary Computation Workshop}, year = {2024}, editor = {Erik Hemberg and Roman Senkerik and Una-May O'Reilly and Michal Pluhacek and Tome Eftimov}, pages = {1824--1829}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic improvement, fault localisation, large language models, ANN}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664174}, size = {6 pages}, abstract = {Repairing bugs can be a daunting task for even a human experienced in debugging, so naturally, attempting to automatically repair programs with a computer system is quite challenging. The existing methods of automated program repair leave a lot of room for improvement. Fault localization, which aims to find lines of code that are potentially buggy, minimises the search space of an automated program repair system. Recent work has shown improvement in these fault localization methods, with the use of Large Language Models. Here, we propose a system where a LLM-based fault localization tool, which we call SemiAutoFL, is used within a fully automatic program repair program, ARJA-e. We show that utilising LLM-based fault localization with ARJA-e can significantly improve its performance on real world bugs. ARJA-e with SemiAutoFL can repair 10 bugs that ARJA-e was previously unable to so do. This finding adds to our understanding of how to improve fault localization and automated program repair, highlighting the potential for more efficient and accurate fault localisation methods being applied to automated program repair.}, notes = {GECCO-2024 LLMfwEC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(custode:2024:GECCOcomp, author = {Leonardo Lucio Custode and Chiara Camilla {Migliore Rambaldi} and Marco Roveri and Giovanni Iacca}, title = {Comparing Large Language Models and Grammatical Evolution for Code Generation}, booktitle = {Large Language Models for and with Evolutionary Computation Workshop}, year = {2024}, editor = {Erik Hemberg and Roman Senkerik and Una-May O'Reilly and Michal Pluhacek and Tome Eftimov}, pages = {1830--1837}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, grammatical evolution, large language models, code generation, ANN}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664162}, size = {8 pages}, abstract = {Code generation is one of the most valuable applications of AI, as it allows for automated programming and "self-building" programs. Both Large Language Models (LLMs) and evolutionary methods, such as Genetic Programming (GP) and Grammatical Evolution (GE), are known to be capable of performing code generation with reasonable performance. However, to the best of our knowledge, little work has been done so far on a systematic comparison between the two approaches. Most importantly, the only studies that conducted such comparisons used benchmarks from the GP community, which, in our opinion, may have provided possibly GP-biased results. In this work, we perform a comparison of LLMs and evolutionary methods, in particular GE, using instead a well-known benchmark originating from the LLM community. Our results show that, in this scenario, LLMs can solve significantly more tasks than GE, indicating that GE struggles to match the performance of LLMs on code generation tasks that have different properties from those commonly used in the GP community.}, notes = {GECCO-2024 LLMfwEC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(custode:2024:GECCOcomp2, author = {Leonardo Lucio Custode and Fabio Caraffini and Anil Yaman and Giovanni Iacca}, title = {An investigation on the use of Large Language Models for hyperparameter tuning in Evolutionary Algorithms}, booktitle = {Large Language Models for and with Evolutionary Computation Workshop}, year = {2024}, editor = {Erik Hemberg and Roman Senkerik and Una-May O'Reilly and Michal Pluhacek and Tome Eftimov}, pages = {1838--1845}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms, large language models, landscape analysis, parameter tuning, ANN}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664163}, size = {8 pages}, abstract = {Hyperparameter optimization is a crucial problem in Evolutionary Computation. In fact, the values of the hyperparameters directly impact the trajectory taken by the optimization process, and their choice requires extensive reasoning by human operators. Although a variety of self-adaptive Evolutionary Algorithms have been proposed in the literature, no definitive solution has been found. In this work, we perform a preliminary investigation to automate the reasoning process that leads to the choice of hyperparameter values. We employ two open-source Large Language Models (LLMs), namely Llama2-70b and Mixtral, to analyze the optimization logs online and provide novel real-time hyperparameter recommendations. We study our approach in the context of step-size adaptation for (1 + 1)-ES. The results suggest that LLMs can be an effective method for optimizing hyperparameters in Evolution Strategies, encouraging further research in this direction.}, notes = {GECCO-2024 LLMfwEC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(guo:2024:GECCOcomp3, author = {Ping Guo and Fei Liu and Xi Lin and Qingchuan Zhao and Qingfu Zhang}, title = {{L-AutoDA:} Large Language Models for Automatically Evolving Decision-based Adversarial Attacks}, booktitle = {Large Language Models for and with Evolutionary Computation Workshop}, year = {2024}, editor = {Erik Hemberg and Roman Senkerik and Una-May O'Reilly and Michal Pluhacek and Tome Eftimov}, pages = {1846--1854}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {large language models, adversarial attacks, automated algorithm design, evolutionary algorithms, ANN}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664121}, size = {9 pages}, abstract = {In the rapidly evolving field of machine learning, adversarial attacks pose a significant threat to the robustness and security of models. Amongst these, decision-based attacks are particularly insidious due to their nature of requiring only the model's decision output, which makes them notably challenging to counteract. This paper presents L-AutoDA (Large Language Model-based Automated Decision-based Adversarial Attacks), an innovative methodology that harnesses the generative capabilities of large language models (LLMs) to streamline the creation of such attacks. L-AutoDA employs an evolutionary strategy, where iterative interactions with LLMs lead to the autonomous generation of potent attack algorithms, thereby reducing human intervention. The performance of L-AutoDA was evaluated on the CIFAR-10 dataset, where it demonstrated substantial superiority over existing baseline methods in terms of success rate and computational efficiency. Ultimately, our results highlight the formidable utility of language models in crafting adversarial attacks and reveal promising directions for constructing more resilient AI systems.}, notes = {GECCO-2024 LLMfwEC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pluhacek:2024:GECCOcomp2, author = {Michal Pluhacek and Jozef Kovac and Peter Janku and Tomas Kadavy and Roman Senkerik and Adam Viktorin}, title = {A Critical Examination of Large Language Model Capabilities in Iteratively Refining Differential Evolution Algorithm}, booktitle = {Large Language Models for and with Evolutionary Computation Workshop}, year = {2024}, editor = {Erik Hemberg and Roman Senkerik and Una-May O'Reilly and Michal Pluhacek and Tome Eftimov}, pages = {1855--1862}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary computation, large language models, metaheuristic optimization, automatic algorithm design, GPT, ANN}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664179}, size = {8 pages}, abstract = {In this study, we investigate the applicability, challenges, and effectiveness of the advanced large language model GPT 4 Turbo in enhancing the selected metaheuristic algorithm, which is Differential Evolution. Our research primarily examines whether iterative, repetitive prompting could lead to progressive improvements in algorithm performance. We also explore the potential of developing enhanced algorithms through this process that markedly surpass the established baseline in terms of performance. In addition, the impact of the model's temperature parameter on these improvements is evaluated. As part of our diverse testing approach, we conduct an experiment where the best-performing algorithm from the initial phase is used as a new baseline. This step is to determine if further refinement via GPT 4 Turbo can achieve even better algorithmic efficiency. Finally, we have performed the benchmarking comparison of selected enhanced variants against the top three algorithms from the CEC 2022 competition.}, notes = {GECCO-2024 LLMfwEC A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(altarabichi:2024:GECCOcomp2, author = {Mohammed Ghaith Altarabichi and Abdallah Alabdallah and Sepideh Pashami and Thorsteinn Roegnvaldsson and Slawomir Nowaczyk and Mattias Ohlsson}, title = {Improving Concordance Index in Regression-based Survival Analysis: Evolutionary Discovery of Loss Function for Neural Networks}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1863--1869}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neuroevolution, evolutionary meta-learning, loss function, neural networks, survival analysis, concordance index, genetic algorithms}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664129}, size = {7 pages}, abstract = {In this work, we use an Evolutionary Algorithm (EA) to discover a novel Neural Network (NN) regression-based survival loss function with the aim of improving the C-index performance. Our contribution is threefold; firstly, we propose an evolutionary meta-learning algorithm SAGAloss for optimizing a neural-network regression-based loss function that maximizes the C-index; our algorithm consistently discovers specialized loss functions that outperform MSCE. Secondly, based on our analysis of the evolutionary search results, we highlight a non-intuitive insight that signifies the importance of the non-zero gradient for the censored cases part of the loss function, a property that is shown to be useful in improving concordance. Finally, based on this insight, we propose MSCESp, a novel survival regression loss function that can be used off-the-shelf and generally performs better than the Mean Squared Error for censored cases. We performed extensive experiments on 19 benchmark datasets to validate our findings.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(barba:2024:GECCOcomp, author = {Edoardo Barba and Anil Yaman and Giovanni Iacca}, title = {The Effect of Training Schedules on Morphological Robustness and Generalization}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1870--1878}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neuroevolution, generalization, robustness, variability}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664138}, size = {9 pages}, abstract = {Robustness and generalizability are the key properties of artificial neural network (ANN)-based controllers for maintaining a reliable performance in case of changes. It is demonstrated that exposing the ANNs to variations during training processes can improve their robustness and generalization capabilities. However, the way in which this variation is introduced can have a significant impact. In this paper, we define various training schedules to specify how these variations are introduced during an evolutionary learning process. In particular, we focus on morphological robustness and generalizability concerned with finding an ANN-based controller that can provide sufficient performance on a range of physical variations. Then, we perform an extensive analysis of the effect of these training schedules on morphological generalization. Furthermore, we formalize the process of training sample selection (i.e., morphological variations) to improve generalization as a reinforcement learning problem. Overall, our results provide deeper insights into the role of variability and the ways of enhancing the generalization property of evolved ANN-based controllers.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(claret:2024:GECCOcomp, author = {Romain Claret and Michael O'Neill and Paul Cotofrei and Kilian Stoffel}, title = {Investigating Hyperparameter Optimization and Transferability for {ES-HyperNEAT:} A {TPE} Approach}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1879--1887}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neuroevolution, ES-HyperNEAT, hyperparameter optimization, tree-structured parzen estimator, MNIST, fashion-MNIST, logic operations, transfer learning}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664144}, size = {9 pages}, abstract = {Neuroevolution of Augmenting Topologies (NEAT) and its advanced version, Evolvable-Substrate HyperNEAT (ES-HyperNEAT), have shown great potential in developing neural networks. However, their effectiveness heavily depends on the selection of hyperparameters. This study investigates the optimization of ES-HyperNEAT hyperparameters using the Tree-structured Parzen Estimator (TPE) on the MNIST classification task, exploring a search space of over 3 billion potential combinations. TPE effectively navigates this vast space, significantly outperforming random search in terms of mean, median, and best accuracy. During the validation process, the best hyperparameter configuration found by TPE achieves an accuracy of 29.00\% on MNIST, surpassing previous studies while using a smaller population size and fewer generations. The transferability of the optimized hyperparameters is explored in logic operations and Fashion-MNIST tasks, revealing successful transfer to the more complex Fashion-MNIST problem but limited to simpler logic operations. This study emphasizes a method to unlock the full potential of neuroevolutionary algorithms and provides insights into the hyperparameters' transferability across tasks of varying complexity.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(coleti:2024:GECCOcomp, author = {Mark Coleti and Chathika Gunaratne and Steven Young and Swetha Varadarajan and Robert Patton and Tom Potok}, title = {Efficacy of using a dynamic length representation vs. a fixed-length for neuroarchitecture search}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1888--1894}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {Neuroevolution}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664149}, size = {7 pages}, abstract = {Deep learning neuroarchitecture and hyperparameter search are important in finding the best configuration that maximizes learned model accuracy. However, the number of types of layers, their associated hyperparameters, and the myriad of ways to connect layers poses a significant computational challenge in discovering ideal model configurations. Here, we assess two different approaches for neuroarchitecture search for a LeNet style neural network, one that uses a fixed-length approach where there is a preset number of possible layers that can be toggled on or off via mutation, and a variable-length approach where layers can be freely added or removed via special mutation operators. We found that the variable-length implementation trained better models while discovering unusual layer configurations worth further exploration.This manuscript has been authored by UT-Battelle, LLC under Contract No. DE-AC05-00OR22725 with the U.S. Department of Energy. The United States Government retains and the publisher, by accepting the article for publication, acknowledges that the United States Government retains a non-exclusive, paid-up, irrevocable, world-wide license to publish or reproduce the published form of this manuscript, or allow others to do so, for United States Government purposes. The Department of Energy will provide public access to these results of federally sponsored research in accordance with the DOE Public Access Plan (http://energy.gov/downloads/doe-public-access-plan).}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(dorighello:2024:GECCOcomp, author = {Renato Sellaro Dorighello and Myriam Regattieri Delgado and Ricardo Lueders and Daniel Fernando Pigatto}, title = {{ACO-Pruning} for Deep Neural Networks: A Case Study in {CNNs}}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1895--1903}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {swarm intelligence, pruning neural networks, convolutional neural network optimization, Neuroevolution}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664125}, size = {9 pages}, abstract = {Deep Neural Networks (DNNs) are successful in several tasks, mainly due to their ability to process a large volume of data, given their huge number of parameters and computational operations. Larger and deeper models have been developed to improve their performance with an increasing computational cost. Pruning algorithms are strategies necessary to mitigate the computational burden and achieve better performance by eliminating parts of the network structure while maintaining good training and testing results. Dynamic network pruning increases performance through online choices of inference paths depending on various inputs. This work proposes a new Ant Colony Optimization Pruning (ACO-P) algorithm for dynamic pruning based on swarm intelligence to compress the model without jeopardizing accuracy. We validate ACO-P with a CNN model on the MNIST dataset by comparing it with a baseline pruner that uses random choices, and a well-established dynamic pruning method based on a secondary neural network. The results show that our proposal is a computationally more efficient alternative, capable of achieving higher pruning rates.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(el-saliby:2024:GECCOcomp, author = {Michel {El Saliby} and Giorgia Nadizar and Erica Salvato and Eric Medvet}, title = {Eventually, all you need is a simple evolutionary algorithm (for neuroevolution of continuous control policies)}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1904--1913}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neuroevolution, continuous control, policy search}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664112}, size = {10 pages}, abstract = {Artificial neural networks (ANNs) are a popular choice for tackling continuous control tasks due to their approximation abilities. When the ANN architecture is fixed, finding optimal weights becomes a numerical optimization problem, suitable for evolutionary algorithms (EAs), i.e., a form of neuroevolution. Here, we compare the performance of well-established EAs in solving neuroevolution problems, focusing on continuous control. We evaluate them on a set of navigation problems and a set of control problems based on modular soft robots. As a reference, we compare the same EAs on regression problems and classic numerical optimization benchmarks. Our findings suggest that simple EAs like genetic algorithm (GA) and differential evolution (DE) achieve good performance on control problems, even if they are surpassed by more sophisticated algorithms on benchmark problems. We hypothesize that the effectiveness of these simpler EAs stems from their use of crossover, which can be advantageous in the rugged fitness landscapes encountered in complex control tasks.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(guijt:2024:GECCOcomp, author = {Arthur Guijt and Dirk Thierens and Tanja Alderliesten and Peter A. N. Bosman}, title = {Exploring the Search Space of Neural Network Combinations obtained with Efficient Model Stitching}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1914--1923}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neuroevolution, neural architecture search, stitching, ensembles}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664131}, size = {10 pages}, abstract = {Machine learning models can be made more performant and their predictions more consistent by creating an ensemble. Each neural network in an ensemble commonly performs its own feature extraction. These features are often highly similar, leading to potentially many redundant calculations. Unifying these calculations (i.e., reusing some of them) would be desirable to reduce computational cost. However, splicing two trained networks is non-trivial because architectures and feature representations typically differ, leading to a performance breakdown. To overcome this issue, we propose to employ stitching, which introduces new layers at crossover points. Essentially, a new network consisting of the two basis networks is constructed. In this network, new links between the two basis networks are created through the introduction and training of stitches. New networks can then be created by choosing which stitching layers to (not) use, thereby selecting a subnetwork. Akin to a supernetwork, assessing the performance of a selected subnetwork is efficient, as only their evaluation on data is required. We experimentally show that our proposed approach enables finding networks that represent novel trade-offs between performance and computational cost compared to classical ensembles, with some new networks even dominating the original networks.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kar:2024:GECCOcomp, author = {Devroop Kar and Zimeng Lyu and Alexander G. Ororbia and Travis Desell and Daniel Krutz}, title = {Enabling An Informed Contextual {Multi-Armed} Bandit Framework For Stock Trading With Neuroevolution}, booktitle = {Neuroevolution at work}, year = {2024}, editor = {Ivanoe {De Falco} and Antonio Della Cioppa and Edgar Galvan and Umberto Scafuri and Mengjie Zhang}, pages = {1924--1933}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {multi-armed bandits, recurrent neural networks, decision making, Neuroevolution}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664145}, size = {10 pages}, abstract = {Multi-armed bandits and contextual multi-armed bandits have demonstrated their proficiency in a variety of application areas. However, these models are highly susceptible to volatility and often exhibit knowledge gaps due to a limited understanding of future states. In this paper, we propose a new bandit framework for what we refer to as informed contextual multi armed bandits (iCMABs) to mitigate these gaps, facilitating "informed" decisions based on predicted future contexts. The performance of an iCMAB is thus highly dependent on the accuracy of the forecast it uses. We examine the use of recurrent neural networks (RNNs) evolved through the EX-AMM neuroevolution algorithm as compared to other time series forecasting (TSF) methods and evaluate our iCMAB framework's ability to make stock market trading decisions for the Dow-Jones Index (DJI) in comparison to other decision making strategies using these forecasts. Our results demonstrate that an iCMAB, driven by evolved RNN architectures, performs better than statistical TSF methods, fixed architecture RNNs for TSF, and other CMAB methods. Using evolved RNNs, iCMAB is able to achieve the highest return of over 21\%, a ~7\% improvement over not incorporating forecasted values, and a ~5\% improvement over DJI's return for that time period.}, notes = {GECCO-2024 NEWK A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bhandari:2024:GECCOcomp, author = {Shailendra Bhandari and Pedro Lencastre and Pedro Lind}, title = {Modeling stochastic eye tracking data: A comparison of quantum generative adversarial networks and Markov models}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1934--1941}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quantum generative adversarial networks QGANs, markov models, eye-tracking data, Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664134}, size = {8 pages}, abstract = {We explore the use of quantum generative adversarial networks QGANs for modeling eye movement velocity data. We assess whether the advanced computational capabilities of QGANs can enhance the modeling of complex stochastic distribution beyond the traditional mathematical models, particularly the Markov model. The findings indicate that while QGANs demonstrate potential in approximating complex distributions, the Markov model consistently outperforms in accurately replicating the real data distribution. This comparison underlines the challenges and avenues for refinement in time series data generation using quantum computing techniques. It emphasizes the need for further optimization of quantum models to better align with real-world data characteristics.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(farhani:2024:GECCOcomp, author = {Yousra Farhani and Malak Saiem and Taha Arbaoui and Faicel Hnaien}, title = {Enhanced {QUBO} Formulations for The Flow Shop Scheduling Problem}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1942--1949}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quantum annealing, QUBO, flow shop scheduling problem (FSP), traveling salesman problem (TSP), D-wave quantum annealer, combinatorial optimization, Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664150}, size = {8 pages}, abstract = {In recent years, the Flow shop Scheduling Problem (FSP) has gained significant attention, prompting researchers to develop various methods, from conventional to hybrid optimization algorithms. Driven by the rapid growth of quantum computing capabilities, quantum approaches applied to optimization problems have attracted an increasing research effort. This work attempts to solve the permutation flow shop scheduling problem using quantum annealing by formulating the problem as a Quadratic Unconstrained Binary Optimization model (QUBO). Two QUBO formulations tailored to address the FSP are used. The first formulation is based on the position-based modelling while the second is based on five approximations of FSP as a Traveling Salesman Problem (TSP). The QUBO formulations are tested, using D-Wave's quantum annealers, on the well-known Taillard FSP benchmark and compared against each other. Results show that the proposed position-based QUBO reaches better solutions than the TSP-based QUBOs. This work is an attempt to highlight the increasing effectiveness of quantum annealers in addressing complex optimization problems and their limitations compared to conventional classical methods.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(halffmann:2024:GECCOcomp, author = {Pascal Halffmann and Michael Trebing and Steve Lenk}, title = {Harnessing Inferior Solutions For Superior Outcomes: Obtaining Robust Solutions From Quantum Algorithms}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1950--1953}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quantum computing, robust optimization, quantum optimization, quantum annealing, variational algorithms}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664160}, size = {4 pages}, abstract = {In the rapidly advancing domain of quantum optimization, the confluence of quantum algorithms such as Quantum Annealing (QA) and the Quantum Approximate Optimization Algorithm (QAOA) with robust optimization methodologies presents a cutting-edge frontier. Although it seems natural to apply quantum algorithms when facing uncertainty, this has barely been approached.In this paper we adapt the aforementioned quantum optimization techniques to tackle robust optimization problems. By leveraging the inherent stochasticity of quantum annealing and adjusting the parameters and evaluation functions within QAOA, we present two innovative methods for obtaining robust optimal solutions. These heuristics are applied on two use cases within the energy sector: the unit commitment problem, which is central to the scheduling of power plant operations, and the optimization of charging electric vehicles including electricity from photovoltaic to minimize costs. These examples highlight not only the potential of quantum optimization methods to enhance decision-making in energy management but also the practical relevance of the young field of quantum computing in general. Through careful adaptation of quantum algorithms, we lay the foundation for exploring ways to achieve more reliable and efficient solutions in complex optimization scenarios that occur in the real-world.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(kameyama:2024:GECCOcomp, author = {Hiroaki Kameyama and Yutaka Takita and Matthieu Parizy and Shinichi Sazawa and Pablo Caballero}, title = {Benchmarks for Digital Annealer with Quadratic Constrained Binary Optimization Problems}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1954--1957}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {combinatorial optimization, binary quadratic programming problem, quadratic unconstrained binary optimization, digital annealer, Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664114}, size = {4 pages}, abstract = {In this paper, we evaluate the performance of Fujitsu's Digital Annealer, an original computing technology inspired by quantum phenomena and developed to solve practical combinatorial optimization problems, using the public benchmark QPLIB. As a result of the performance evaluation, it was confirmed that the best published solution was obtained in more than 90\% of the instances at high speed.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(muecke:2024:GECCOcomp, author = {Sascha Muecke}, title = {A Simple {QUBO} Formulation of Sudoku}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1958--1962}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664106}, size = {5 pages}, abstract = {This article describes how to solve Sudoku puzzles using Quadratic Unconstrained Binary Optimization (QUBO). To this end, a QUBO instance with 729 variables is constructed, encoding a Sudoku grid with all constraints in place, which is then partially assigned to account for clues. The resulting instance can be solved with a Quantum Annealer, or any other strategy, to obtain the fully filled-out Sudoku grid. Moreover, as all valid solutions have the same energy, the QUBO instance can be used to sample uniformly from the space of valid Sudoku grids. We demonstrate the described method using both a heuristic solver and a Quantum Annealer.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(sugimura:2024:GECCOcomp, author = {Masahiko Sugimura and Matthieu Parizy}, title = {{A3TUM:} Automated Tabu Tenure Tuning by Unique Move for Quadratic Unconstrained Binary Optimization}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1963--1971}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {optimization, tabu search, tabu tenure, quadratic unconstrained binary optimization, QUBO, Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664111}, size = {9 pages}, abstract = {Tabu search is one of the promising solving methods for quadratic unconstrained binary problem (QUBO). A critical parameter of tabu search is tabu tenure, which balances the behavior of the solver between greedy and exploratory. Our goal is to establish a way to find out tabu tenure suitable for each problem instance in QUBO formulation and utilize it to enhance solver performance. In order to achieve the goal, we proposed a new metric unique move, which is a number of unique decision variables changed, and studied its behavior in several problem instances in QUBO formulation. Based on the study, our proposed method named A3TUM was designed to take a suitable sub-method for each problem instance by classifying problems into typical patterns based on the behavior of unique move. Experimental result shows the effectiveness of the method both in estimation accuracy of tabu tenure and in solving performance compared to a robust tabu search based method as a baseline.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(unterauer:2024:GECCOcomp, author = {Arnold Unterauer and David Bucher and Matthias Knoll and Constantin Economides and Michael Lachner and Thomas Germain and Moritz Kessel and Smajo Hajdinovic and Jonas Stein}, title = {Solving the Turbine Balancing Problem using Quantum Annealing}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1972--1978}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {quantum computing, quantum annealing, aerospace, turbine balancing problem}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664107}, size = {7 pages}, abstract = {Quantum computing has the potential for disruptive change in many sectors of industry, especially in materials science and optimization. In this paper, we describe how the turbine balancing problem can be solved with quantum computing, which is the NP-hard optimization problem of analytically balancing rotor blades in a single plane as found in turbine assembly. Small yet relevant instances occur in industry, which makes the problem interesting for early quantum computing benchmarks. We model it as a quadratic unconstrained binary optimization problem and compare the performance of a classical rule-based heuristic and D-Wave Systems' quantum annealer Advantage_system4.1. In this case study, we use real-world as well as synthetic datasets and observe that the quantum hardware significantly improves an actively used heuristic's solution for small-scale problem instances with bare disk imbalance in terms of solution quality. Motivated by this performance gain, we subsequently design a quantum-inspired classical heuristic based on simulated annealing that achieves very good results on all given problem instances, essentially solving the optimization problem sufficiently well for all considered datasets, according to industrial requirements.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(vargas:2024:GECCOcomp, author = {Axel Vargas and Pradyumn Shukla and Richard Allmendinger and Andreas Jaeger}, title = {On Solving the Capacitated Vehicle Routing Problem with Time Windows using Quantum Annealing}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1979--1983}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {capacitated vehicle routing problem with time windows, quantum annealing, optimization algorithms, scheduling algorithms, operations research, network optimization, Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664139}, size = {5 pages}, abstract = {This study explores Quantum Annealing (QA) versus Classical Computing (CC) approaches in solving the Capacitated Vehicle Routing Problem with Time Windows (CVRPTW), using real-world logistics data from Siemens Advanta. By modeling the problem as a Service Network Design (SND) with binary variables and framing CVRPTW within a quantum mechanical paradigm suitable for QA, this approach leverages quantum annealing in a novel manner to tackle the complex optimization challenges inherent in the VRP. Contrasting it with the performance of the traditional branch and cut solver---a specific CC technique---this research unveils the prospective benefits and existing challenges of QA in complex optimization scenarios, thereby emphasizing the need for advancing quantum computing.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(zielinski:2024:GECCOcomp, author = {Sebastian Zielinski and Maximilian Zorn and Thomas Gabor and Sebastian Feld and Claudia Linnhoff-Popien}, title = {Using an Evolutionary Algorithm to Create {(MAX)-3SAT} {QUBOs}}, booktitle = {Quantum Optimization}, year = {2024}, editor = {Alberto Moraglio and Francisco Chicano and Ofer Shir and Lee Spector and Matthieu Parizy and Markus Wagner}, pages = {1984--1992}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {QUBO, (MAX)-3SAT, combinatorial optimization, evolutionary algorithm, Quantum Computing}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664153}, size = {9 pages}, abstract = {A common way of solving satisfiability instances with quantum methods is to transform these instances into instances of QUBO. State-of-the-art transformations from MAX-3SAT to QUBO work by mapping clauses of a 3SAT formula associated with the MAX-3SAT instance to an instance of QUBO and combining the resulting QUBOs into a single QUBO instance representing the whole MAX-3SAT instance. As creating these transformations is currently done manually or via exhaustive search methods and is, therefore, algorithmically inefficient, we see potential for including search-based optimization. In this paper, we propose two methods of using evolutionary algorithms to create QUBO representations of MAX-3SAT problems automatically. We evaluate our created QUBOs on 500 and 1000-clause 3SAT formulae and find competitive performance to state-of-the-art baselines when using both classical and quantum annealing solvers.}, notes = {GECCO-2024 QuantOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(halsema:2024:GECCOcomp, author = {Martijn Halsema and Diederick Vermetten and Thomas Baeck and Niki {Van Stein}}, title = {A Critical Analysis of Raven Roost Optimization}, booktitle = {Swarm Intelligence Algorithms: Foundations, Perspectives and Challenges}, year = {2024}, editor = {Roman Senkerik and Pavel Kromer}, pages = {1993--2001}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664124}, size = {9 pages}, abstract = {This study critically examines the Raven Roost Optimization (RRO) algorithm within the broader context of nature-inspired metaheuristics, challenging its novelty and efficacy in the field of black-box optimization. Many similar methods use ideas from nature, but it's important to see if they really bring something new to the table. We compared RRO with another well-known method called Particle Swarm Optimization (PSO) to see how well RRO works and if it's truly a new idea. Through comprehensive analysis and benchmarking, we reveal that RRO's purported novelty largely recapitulates existing strategies under a new metaphorical guise. The algorithm's performance is systematically evaluated across various dimensions, revealing inherent limitations and biases introduced by its metaphorical foundation. Our findings advocate for a critical reassessment of metaphor-based heuristics, urging the computational intelligence community to prioritize substantive algorithmic advancements over superficial novelty. The call for rigorous evaluation and validation of new optimization methods is underscored, emphasizing the need for transparency, reproducibility, and genuine innovation in algorithmic design. This work contributes to the ongoing discourse on the validation and merit of bio-inspired algorithms, providing insights that may guide future research towards more meaningful and empirically justified contributions.}, notes = {GECCO-2024 SWINGA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(helbig:2024:GECCOcomp, author = {Marde Helbig}, title = {Visualising Found Solutions and Measures for Dynamic Multi-objective Optimisation}, booktitle = {Swarm Intelligence Algorithms: Foundations, Perspectives and Challenges}, year = {2024}, editor = {Roman Senkerik and Pavel Kromer}, pages = {2002--2009}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {dynamic multi-objective optimisation, visualisation, performance measures, pareto-optimal front, search}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664151}, size = {8 pages}, abstract = {Dynamic multi-objective optimisation problems (DMOPs) are dynamic in nature where at least two objectives are in conflict with one another and the objectives and/or constraints change over time. Dynamic multi-objective optimisation algorithms (DMOAs) have to find solutions in a short time before the environment changes, after which the found solutions may not be optimal anymore. Before applying a DMOA to real-world problems it is important to understand the behaviour of the algorithm under various environment changes and when irregular changes occur. Typically plots of the DMOA's found trade-off solutions or Pareto-optimal fronts (POFs) and their corresponding performance measure values are plotted over time. However, this makes it difficult to analyse the behaviour of the DMOA's population during the search process and especially after changes occurred. This paper proposes a visualisation approach to assist with the analysis of a DMOA's behaviour during the search by incorporating the found POF, the reference POF, performance measure values and when changes occur into one plot that can be viewed for each iteration of the search process.}, notes = {GECCO-2024 SWINGA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(noborio:2024:GECCOcomp, author = {Kojiro Noborio and Noriyuki Fujimoto}, title = {Solving {QUBO} with {MOPSO} and Decomposition}, booktitle = {Swarm Intelligence Algorithms: Foundations, Perspectives and Challenges}, year = {2024}, editor = {Roman Senkerik and Pavel Kromer}, pages = {2010--2017}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {combinatorial optimization, QUBO, multi-objective particle swarm optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664167}, size = {8 pages}, abstract = {Many practically important combinatorial optimization problems have been found to be reducible to Quadratic Unconstrained Binary Optimization (QUBO) problems. Solving QUBO problems can consequently solve many other important combinatorial optimization problems. Fujimoto and Nanai, in their 2021 presentation at the GECCO Workshop PDEIM, proposed a method to reduce QUBO into a bi-objective boundary constrained continuous optimization problem and then solve it using the Multi-Objective Particle Swarm Optimization (MOPSO).This paper presents an improved MOPSO method with decomposition inspired by the idea of the Multi-Objective Evolutionary Algorithm based on Decomposition (MOEA/D), which divides multi-objective optimization problems into subproblems of single-objective optimization. We propose an enhanced method that modifies Fujimoto and Nanai's approach using this improved MOPSO. Furthermore, we experimentally compare its performance with methods that use other representative multi-objective optimization algorithms in place of MOPSO for Fujimoto and Nanai's approach, and with methods that directly solve QUBO using standard single-objective optimization algorithms. The experimental results show that, for larger-sized problems, both the convergence speed and the accuracy of the obtained QUBO solutions improved by the proposed method.}, notes = {GECCO-2024 SWINGA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(pluhacek:2024:GECCOcomp3, author = {Michal Pluhacek and Jozef Kovac and Adam Viktorin and Peter Janku and Tomas Kadavy and Roman Senkerik}, title = {Using {LLM} for Automatic Evolvement of Metaheuristics from Swarm Algorithm {SOMA}}, booktitle = {Swarm Intelligence Algorithms: Foundations, Perspectives and Challenges}, year = {2024}, editor = {Roman Senkerik and Pavel Kromer}, pages = {2018--2022}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary computation, large language models, metaheuristic optimization, self-organizing migrating algorithm, automatic algorithm design, GPT}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664181}, size = {5 pages}, abstract = {This study investigates the use of the GPT-4 Turbo, a large language model, to enhance the Self-Organizing Migrating Algorithm (SOMA), specifically its All to All variant (SOMA-ATA). Utilizing the model's extensive context capacity for iterative prompting without feedback, we sought to autonomously generate superior algorithmic versions. Contrary to our initial hypothesis, the improvements did not progress linearly. Nevertheless, one iteration stood out significantly, consistently outperforming the baseline across various pairwise comparisons and showing a robust performance profile. This iteration's structure deviated substantially from traditional SOMA principles, underscoring the potential of large language models to create distinctive and effective algorithmic strategies. The results affirm the methodology's ability to produce high-performing algorithms without expert intervention, setting the stage for future research to integrate feedback mechanisms and conduct detailed code analyses to understand further the modifications made by the Large Language Models.}, notes = {GECCO-2024 SWINGA A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(krawczyk:2024:GECCOcomp, author = {Konrad Krawczyk and Jaroslaw Arabas}, title = {Empirical Study of Surrogate Model Assisting {JADE:} Relation Between the Model Accuracy and the Optimization Efficiency}, booktitle = {Workshop on Surrogate-Assisted Evolutionary Optimisation}, year = {2024}, editor = {Jonathan Fieldsend and Handing Wang and Yaochu Jin and Tinkle Chugh}, pages = {2023--2031}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {evolutionary algorithms, differential evolution, surrogate models, optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664119}, size = {9 pages}, abstract = {This paper presents an empirical study of the combination of surrogate models with the JADE algorithm. It focuses on the connection between the accuracy of surrogate models and optimization efficiency. Our study uses surrogate models to approximate costly fitness functions that assist the optimization process. Our analysis primarily focuses on how the accuracy of surrogate models affects the effectiveness of the JADE algorithm in a search space when searching for optimal solutions. Experiments were performed using various surrogate models. The effectiveness of the different surrogate models in combination with JADE was tested on the CEC2013 benchmark. Our observations provide insights into how the dynamics of the error of surrogate models look and what their correlation with optimization efficiency is. Cases were also observed where models with higher error rates paradoxically contribute to better optimization results. Our findings reveal that higher surrogate model accuracy does not necessarily equate to more effective optimization. Instead, a model's ability to preserve the hierarchical order of solutions, which helps with effective mutation and selection, is also an important factor. The study underlines the importance of evaluating surrogate models not only on their approximation errors but also on their ability to maintain accurate rankings.}, notes = {GECCO-2024 SAEOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rahi:2024:GECCOcomp, author = {Kamrul Rahi and Hemant Singh and Tapabrata Ray}, title = {Towards solving expensive optimization problems with heterogeneous constraint costs}, booktitle = {Workshop on Surrogate-Assisted Evolutionary Optimisation}, year = {2024}, editor = {Jonathan Fieldsend and Handing Wang and Yaochu Jin and Tinkle Chugh}, pages = {2032--2040}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {expensive constrained optimization, selective evaluation, heterogeneous evaluation cost}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664105}, size = {9 pages}, abstract = {Expensive constrained optimization problems (ECOPs) are prevalent in practical applications. Surrogate-assisted constrained evolutionary algorithms (SACEAs) are commonly used to solve ECOPs. Existing SACEAs may assess candidate solutions through so-called full evaluations, where all objectives and constraints are unconditionally evaluated, or partial evaluations, where only a subset of them is evaluated. A challenging aspect of the latter category is dealing with cases where individual objective and/or constraint evaluations have different evaluation costs (referred to as heterogeneous costs). While some recent research has been conducted on heterogeneous objectives, scarce attention has been paid to problems involving heterogeneous constraints. Towards addressing this gap, this paper investigates the impact of heterogeneous constraint evaluation costs on algorithm performance. An existing approach (SParEA) that uses partial constraint evaluations is enhanced through a simple update that accounts for the individual constraint costs. The updated algorithm is compared to its base version and its full evaluation version. Numerical experiments are conducted on four systematically constructed types of problems involving different constraint evaluation scenarios to quantitatively demonstrate the benefits of the proposed approach.}, notes = {GECCO-2024 SAEOpt A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(de-melo:2024:GECCOcomp, author = {Vinicius Veloso {De Melo} and Wolfgang Banzhaf and Giovanni Iacca}, title = {Accelerating {GP} Genome Evaluation Through Real Compilation with a Multiple Program Single Data Approach}, booktitle = {Symbolic Regression}, year = {2024}, editor = {William {La Cava} and Steven Gustafson}, pages = {2041--2049}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, multiple program single data}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664168}, size = {9 pages}, abstract = {Genetic Programming (GP) presents a unique challenge in fitness evaluation due to the need to repeatedly execute the evolved programs, often represented as tree structures, to assess their quality on multiple input data. Traditional approaches rely on interpreting these program trees, which can be computationally expensive. This paper proposes an optimization method that leverages code generation using a novel strategy and Just-In-Time (JIT) compilation to significantly improve the efficiency of fitness evaluation in GP. We propose to revisit using an actual compiler to transform a GP individual into native computer code executable quickly on the CPU. Our GP implementation is a simple tree-based approach that makes it easy for researchers to experiment with, while the evaluation function shows high performance. Preliminary results in Symbolic Regression on artificial datasets demonstrate that our approach is more than 6\texttimes{} faster than a popular GP framework that uses high-performance single-function evaluation but in a stack-based interpreter mode.}, notes = {GECCO-2024 SymReg A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(dick:2024:GECCOcomp, author = {Grant Dick and Caitlin Owen}, title = {Characterising the Double Descent of Symbolic Regression}, booktitle = {Symbolic Regression}, year = {2024}, editor = {William {La Cava} and Steven Gustafson}, pages = {2050--2057}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, symbolic regression, bias-variance tradeoff, double descent}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664176}, size = {8 pages}, abstract = {Recent work has argued that many machine learning techniques exhibit a 'double descent' in model risk, where increasing model complexity beyond an interpolation zone can overcome the bias-variance tradeoff to produce large, over-parameterised models that generalise well to unseen data. While the double descent characteristic has been identified in many learning methods, it has not been explored within symbolic regression research. This paper presents an initial exploration into the presence of double descent behaviour in symbolic regression over a range of parameter settings. Results suggest that symbolic regression via genetic programming does not exhibit a clear double descent risk curve relative to model size or function set. Unlike other methods, models evolved through symbolic regression do not appear to strongly interpolate training data, which promotes a degree of robustness towards noise in training data. However, models evolved by symbolic regression can still be large and do not present a strong overfitting characteristic. Given that a prime motivation for symbolic regression is to produce compact interpretable models, these results suggest that methods aimed at regularising evolved models should be a key feature of all symbolic regression methods.}, notes = {GECCO-2024 SymReg A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(leser:2024:GECCOcomp, author = {Patrick Leser and Geoffrey Bomarito and Gabriel Kronberger and Fabricio {Olivetti De Franca}}, title = {Comparing Methods for Estimating Marginal Likelihood in Symbolic Regression}, booktitle = {Symbolic Regression}, year = {2024}, editor = {William {La Cava} and Steven Gustafson}, pages = {2058--2066}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, symbolic regression, model selection, equation learning, marginal likelihood}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664142}, size = {9 pages}, abstract = {Marginal likelihood has been proposed as a genetic programming-based symbolic regression (GPSR) fitness metric to prevent overly complex expressions and overfitting, particularly when data is limited and noisy. Here, two particular methods for estimating marginal likelihood - the Laplace approximation and sequential Monte Carlo - are studied with a focus on tradeoffs between accuracy and computational efficiency. The comparison focuses on practical challenges in the context of two sets of example problems. First, the methods are compared on handcrafted expressions exhibiting nonlinearity and multimodality in their respective posterior distributions. Next, the methods are compared on a real-world set of equations produced by GPSR using training data from a well-known symbolic regression benchmark. A key finding is that there are potentially significant differences between the methods that, for example, could lead to conflicting selection of expressions within a GPSR implementation. However, it is concluded that there are scenarios where either method could be preferred over the other based on accuracy or computational budget. Algorithmic improvements for both methods as well as future areas of study are discussed.}, notes = {GECCO-2024 SymReg A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(liu:2024:GECCOcomp3, author = {Hongyu Liu and Haoyang Liu and Yufei Kuang and Jie Wang and Bin Li}, title = {Deep Symbolic Optimization for Combinatorial Optimization: Accelerating Node Selection by Discovering Potential Heuristics}, booktitle = {Symbolic Regression}, year = {2024}, editor = {William {La Cava} and Steven Gustafson}, pages = {2067--2075}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {combinatorial optimization, machine learning, deep symbolic regression, branch-and-bound solver, node selection}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664128}, size = {9 pages}, abstract = {Combinatorial optimization (CO) is one of the most fundamental mathematical models in real-world applications. Traditional CO solvers, such as Branch-and-Bound (B&B) solvers, heavily rely on expert-designed heuristics, which are reliable but require substantial manual tuning. Recent studies have leveraged deep learning (DL) models as an alternative to capture rich feature patterns for improved performance on GPU machines. Nonetheless, the drawbacks of high training and inference costs, as well as limited interpretability, severely hinder the adoption of DL methods in real-world applications. To address these challenges, we propose a novel deep symbolic optimization learning framework that combines their advantages. Specifically, we focus on the node selection module within B&B solvers---namely, deep symbolic optimization for node selection (Dso4NS). With data-driven approaches, Dso4NS guides the search for mathematical expressions within the high-dimensional discrete symbolic space and then incorporates the highest-performing mathematical expressions into a solver. The data-driven model captures the rich feature information in the input data and generates symbolic expressions, while the expressions deployed in solvers enable fast inference with high interpretability. Experiments demonstrate the effectiveness of Dso4NS in learning high-quality expressions, outperforming existing approaches on a CPU machine. Encouragingly, the learned CPU-based policies consistently achieve performance comparable to state-of-the-art GPU-based approaches.}, notes = {GECCO-2024 SymReg A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(raghav:2024:GECCOcomp, author = {S. Sanjith Raghav and S. Tejesh Kumar and Rishiikesh Balaji and M. Sanjay and C. Shunmuga}, title = {Interactive Symbolic Regression - A Study on Noise Sensitivity and Extrapolation Accuracy}, booktitle = {Symbolic Regression}, year = {2024}, editor = {William {La Cava} and Steven Gustafson}, pages = {2076--2082}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, interactive symbolic regression, gplearn, user-interactive search and interactive evolutionary computation}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664130}, size = {7 pages}, abstract = {This paper presents an interactive symbolic regression framework i-gplearn, which extends the popular Python Symbolic Regression library gplearn with user interactivity. Predominatly, all the Symbolic Regression (SR) algorithms focus on best model fit despite the fact that the final objective is more about scientific insight for the user. Consequently, the possibility of relying on human intuition and expertise for exploratory discovery of models has greater potential in the discovery of relevant and useful mathematical expressions. i-gplearn combines both user score and gplearn's model accuracy. Recent benchmark studies have demonstrated that the state-of-the-art SR approaches find difficulty in solving problems sensitive to noise and problems demanding extrapolation accuracy. i-gplearn interactive experiments with 11 users have been demonstrated on the two tasks - extrapolation accuracy and sensitive to noise conducted as part of the Symbolic Regression competition hosted in 2022 Genetic and Evolutionary Computation Conference (GECCO). The user studies showed competitive performance of i-gplearn against the baseline gplearn in the sensitive to noise task. In the case of extrapolation accuracy, the user interaction studies showed both huge potential in identifying diverse expressions as well as the challenges of interactive experiments.}, notes = {GECCO-2024 SymReg A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(bazhenov:2024:GECCOcomp, author = {Egor Bazhenov and Ivan Jarsky and Valeria Efimova and Sergey Muravyov}, title = {Evolutionary image vectorization with variable curve number}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2083--2086}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {image vectorization, evolutionary algorithm, machine learning, vector graphics}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664171}, size = {4 pages}, abstract = {Vector and raster graphics are the two main types of 2D graphics. Raster graphics operate images consisting of pixels (dots); vector images are created using mathematical objects such as lines, curves, and shapes. The main advantage of vector graphics is that it can be scaled without loss of quality, which is useful for advertising, design, frontend development, and other fields of application. At the moment, the issue of vectorization (conversion from raster to vector graphics) has not been fully resolved. Existing approaches are not able to work with a color gradient and they have other disadvantages, such as artifacts, inaccurate result correspondence to the original raster image, and extremely long processing time. To solve these problems, we propose an evolutionary algorithm for image vectorization. The main idea of our approach is to iteratively improve vector images using mutations and crossovers. The algorithm itself does not require any parameters, such as curve number, other than the original image, which makes it universal. The results of comparison with existing solutions show that our algorithm qualitatively and quickly vectorizes images, outperforms others in terms of pixel-by-pixel MSE by 15\% and does not generate unnecessary shapes in the resulting vector image.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(chen:2024:GECCOcomp5, author = {Siyuan Chen and Wei Fang and Shuwei Zhu}, title = {{TextNet:} An Neural Architecture Search Method based on Rapid Text Processing Network Structure Analysis}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2087--2090}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {neural architecture analysis, neural architecture search, evolutionary algorithm, multi-objective optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664182}, size = {4 pages}, abstract = {Evolutionary algorithm (EA) is well-suited for solving neural architecture search (NAS) problems, however, most of existing multi-objective evolutionary NAS algorithms (MOEA-NAS) have limited their search space to a pre-defined collection of network modules, which could not effectively express the best direction for evolution. To overcome this barrier, TextNet is approached to automatically search any possible efficient network structure module by utilizes a fast network structure analysis method. A list of recorded neural network modules are generated each generation, of which dynamically evaluated efficiency values are passed to the search process. TextNet can effectively complement existing EA-Based NAS algorithms, and three tested representative EMO algorithms show that our methods could empower other EMO algorithms to gain better performance. An open-source implementation is available.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(dos-santos:2024:GECCOcomp, author = {Paulo {Dos Santos} and Marcella Scoczynski and Solange {Amorim Nogueira} and Rafael {Maffei Loureiro} and Cristhiane Goncalves and Wesley {Pacheco Calixto}}, title = {A novel unsupervised segmentation approach for brain computed tomography employing hyperparameter optimization}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2091--2094}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {medical image processing, brain computer tomography scans, network hyperparameters optimization}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664126}, size = {4 pages}, abstract = {This work proposes a methodology for segmenting structures and brain tissues in computed tomography scans using unsupervised deep learning. The methodology involves extracting features from the CT scans and applying similarity and continuity constraints, creating segmentation maps of intracranial structures and observable tissues. This approach can assist experts in diagnosis by identifying specific regions with anomalies. The method is applied to a database of real scans and uses a spatial continuity evaluation function directly related to the desired quantity of structures. Results demonstrate that the proposed unsupervised methodology achieves segmentation of the desired number of labels and allows for a reduction in effort compared to supervised segmentation models.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(gharoun:2024:GECCOcomp, author = {Hassan Gharoun and Mohammad Sadegh Khorshidi and Navid Yazdanjue and Fang Chen and Amir H. Gandomi}, title = {Shadow Gene Guidance: A Novel Approach for Elevating Genetic Programming Classifications and Boosting Predictive Confidence}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2095--2098}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, cross over, uncertainty-aware classification}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664175}, size = {4 pages}, abstract = {This paper introduces a novel classification method that utilizes genetic programming (GP). The primary purpose of the proposed method is to enhance future generations of GP, through continuously refining the genetic makeup of the population for improved classification results. Accordingly, this paper developed the novel method by modifying Boruta feature selection method in such a way that allows to evaluate the significance of individuals' genes. This method creates modified versions of the genes called "shadow genes", evaluates their impact on model performance in competing with shadow genes, and identifies key genes. These key genes are then used to enhance future generations. The obtained results demonstrated that the proposed method not only enhances the fitness of the individuals but also steers the population toward optimal solutions. Furthermore, empirical validation on multiple datasets reveals that the proposed method significantly outperforms classic GP models in both accuracy and reduced prediction entropy, showcasing its superior ability to generate confident and reliable predictions.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(khorshidi:2024:GECCOcomp, author = {Mohammad Sadegh Khorshidi and Navid Yazdanjue and Hassan Gharoun and Danial Yazdani and Mohammad Reza Nikoo and Fang Chen and Amir H. Gandomi}, title = {Enhancing Classification Through Multi-view Synthesis in {Multi-Population} Ensemble Genetic Programming}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2099--2102}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, pattern recognition and classification, multipopulation models, multi-view ensemble learning}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664172}, size = {4 pages}, abstract = {This study proposes a genetic programming (GP) approach for classification, integrating cooperative co-evolution with multi-view synthesis. Addressing the challenges of high-dimensional data, we enhance GP by distributing features across multiple populations, each evolving concurrently and cooperatively. Akin to multi-view ensemble learning, the segmentation of the feature set improves classifier performance by processing disparate data "views". Individuals comprise multiple genes, with a SoftMax function synthesizing gene outputs. An ensemble method combines decisions across individuals from different populations, augmenting classification accuracy and robustness. Instead of exploring the entire search space, this ensemble approach divides the search space to multiple smaller subspaces that are easier to explore and ensures that each population specializes in different aspects of the problem space. Empirical tests on multiple datasets show that the classifier obtained from proposed approach outperforms the one obtained from a single-population GP executed for the entire feature set.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(leite-richardson:2024:GECCOcomp, author = {Frederico {Leite Richardson} and George {De Ath} and Tinkle Chugh}, title = {Is greed still good in multi-objective Bayesian optimisation?}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2103--2106}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {surrogate modelling, gaussian process, multi-objective optimisation, greed is good, epsilon-greedy, uncertainty quantification}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664189}, size = {4 pages}, abstract = {Bayesian optimisation (BO) is a popular tool for solving expensive optimisation problems. BO utilises Bayesian models and balances exploitation and exploration in searching for potential solutions. In this work, we investigate the trade-off between exploration and exploitation in multi-objective BO by comparing three different approaches: selecting points on the estimated Pareto front (PF) of the predicted values of the surrogate models, selecting points on the estimated PF of the predicted uncertainty of the models, and using an epsilon-greedy approach to balance between the two PFs. We evaluate the performance of these approaches on a set of benchmark problems and compare them to a random baseline and expected hypervolume improvement (EHVI). It was found that the epsilon-greedy and fully exploitative approaches were the best performing across all problem dimensionalities and that the performance of EHVI comparatively decreased as the dimensionality of the problem increased.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(rambaldi-migliore:2024:GECCOcomp, author = {Chiara Camilla {Rambaldi Migliore} and Omar {Rota Stabelli} and Giovanni Iacca}, title = {Balancing human livability and bumblebee population in urban green areas via {Agent-Based} Model and Evolutionary Algorithms}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2107--2110}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {agent-based model, multi-objective optimization, ecology}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664143}, size = {4 pages}, abstract = {We present an agent-based model (ABM) for simulating the ecological dynamics within urban parks, specifically focusing on bumblebee demography and conservation. The objective of the model is to simulate the impact of park management on the population of new bumblebee queens. The ABM features simulated agents for plants, bumblebees, and bumblebee colonies, incorporating insights from a literature review on bumblebee behavior and plant ecology. It operates over a three-year simulation period, during which the effects of four key parameters are explored, namely the percentage and the shape of unmowed areas, the mowing frequency, and pesticide treatments. The significance of the ABM becomes evident in estimating the impact of various park management strategies on the bumblebee population. By employing NSGA-II, we showcase a practical application of the proposed ABM, generating a Pareto front that delineates the trade-offs between park livability and the anticipated number of new queens. We also investigate how different solution representations yield different Pareto fronts and how different combinations of initialization methods, population size, and number of generations influence the distribution of the final population. Overall, the proposed approach can offer valuable insights to urban environment managers, aiding in the delicate balance between park livability and biodiversity conservation.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(newbold:2024:GECCOcomp, author = {Thomas Newbold and Alberto Moraglio}, title = {Evolving Quantum Logic Gate Circuits in Qiskit}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2111--2114}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, evolutionary algorithms, quantum logic gates, quantum fourier transform, qiskit}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664108}, size = {4 pages}, abstract = {The emergence of quantum computers represents a crucial leap forward in practical computability, when compared to classical architectures. Harnessing that power effectively is an exercise of increasing importance. Despite research in this field expanding rapidly, little headway has been made towards new quantum algorithms. The complexity of quantum systems makes them conceptually inaccessible to non-experts; therefore, programs are hard to design, hindering advancement. This paper presents a method for genetically encoding and designing arbitrary circuits in Qiskit, a software library developed by IBM for simulating quantum logic gates. The effectiveness of the system is verified by evolving solutions to the Toffoli gate and Quantum Fourier Transform problems.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(song:2024:GECCOcomp, author = {Junhao Song and Yingfang Yuan and Wei Pang}, title = {{SAIS:} A Novel {Bio-Inspired} Artificial Immune System Based on Symbiotic Paradigm}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2115--2118}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {artificial immune systems, computational intelligence, benchmark problems}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664188}, size = {4 pages}, abstract = {We propose a novel type of Artificial Immune System (AIS): Symbiotic Artificial Immune Systems (SAIS), drawing inspiration from symbiotic relationships in biology. SAIS parallels the three key stages (i.e., mutualism, commensalism and parasitism) of population updating from the Symbiotic Organisms Search (SOS) algorithm. This parallel approach effectively addresses the challenges of large population size and enhances population diversity in AIS, which traditional AIS and SOS struggle to resolve efficiently. We conducted a series of experiments, which demonstrated that our SAIS achieved comparable performance to the state-of-the-art approach SOS and outperformed other popular AIS approaches and evolutionary algorithms across 26 benchmark problems. Furthermore, we investigated the problem of parameter selection and found that SAIS performs better in handling a relatively large population size while requiring fewer generations. Finally, we believe SAIS, as a novel bio-inspired and immune-inspired algorithm, paves the way for innovation in bio-inspired computing with the symbiotic paradigm.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, ) @inproceedings(tonelli:2024:GECCOcomp, author = {Teresa Tonelli and Gloria Pietropolli and Gabriele Sbaiz and Luca Manzoni}, title = {Genetic Programming for the Reconstruction of Delay Differential Equations in Economics}, booktitle = {GECCO Student Workshop}, year = {2024}, editor = {Amir H Gandomi}, pages = {2119--2122}, address = {Melbourne, Australia}, series = {GECCO '24}, month = {14-18 July}, organisation = {SIGEVO}, publisher = {Association for Computing Machinery}, publisher_address = {New York, NY, USA}, note = {}, keywords = {genetic algorithms, genetic programming, symbolic regression, differential equations, delay differential equations, kalecki's business cycle model}, ISBN13 = {979-8-4007-0495-6}, url = {}, doi = {doi:10.1145/3638530.3664164}, size = {4 pages}, abstract = {In this paper we explore the reconstruction of a delay differential equation (DDE) model from economics, the Kalecki's business cycle model, with a variant of genetic programming (GP) encoding the structure of DDEs. The results of this preliminary work show that GP can correctly reconstruct the model starting only from approximated data, showing that the investigation of GP for interpretable reconstruction of DDEs can be a worthwile research direction.}, notes = {GECCO-2024 Student Workshop A Recombination of the 33rd International Conference on Genetic Algorithms (ICGA) and the 29th Annual Genetic Programming Conference (GP)}, )