A Comparison and Analysis of LLM Agents within the Context of An LLM-Guided Evolution for Object Detection
Created by W.Langdon from
gp-bibliography.bib Revision:1.8519
- @InProceedings{yu:2025:GECCOcomp,
-
author = "YiMing Yu and Jason Zutty",
-
title = "A Comparison and Analysis of {LLM} Agents within the
Context of An {LLM-Guided} Evolution for Object
Detection",
-
booktitle = "Proceedings of the 2025 Genetic and Evolutionary
Computation Conference Companion",
-
year = "2025",
-
editor = "Ryan Urbanowicz and Will N. Browne",
-
pages = "327--330",
-
address = "Malaga, Spain",
-
series = "GECCO '25 Companion",
-
month = "14-18 " # jul,
-
organisation = "SIGEVO",
-
publisher = "Association for Computing Machinery",
-
publisher_address = "New York, NY, USA",
-
keywords = "genetic algorithms, genetic programming, grammatical
evolution, computer aided/automated design, automated
machine learning, large language models,
neuroevolution, Evolutionary Machine Learning: Poster",
-
isbn13 = "979-8-4007-1464-1",
-
URL = "
https://doi.org/10.1145/3712255.3726669",
-
DOI = "
doi:10.1145/3712255.3726669",
-
size = "4 pages",
-
abstract = "In machine learning, evolutionary algorithms have
traditionally depended on fixed rules and predefined
building blocks. The Guided Evolution (GE) framework
transforms this approach by incorporating Large
Language Models (LLMs) to directly modify code and
intelligently guide both mutations and crossovers. A
key element of GE is the {"}Evolution of Thought{"}
(EoT) technique, which establishes positive feedback
loops, allowing LLMs to refine their decisions
iteratively based on successful results. Building on
this concept, we introduced the LLM-guided evolution to
modify the architecture of a You Only Look Once (YOLO)
model and enhance its performance on the KITTI dataset.
Our findings show that LLM-Guided Evolution produced
variants with significant performance improvements,
such as an increase in Mean Average Precision from
92.5\% to 94.7\%. we also evaluated various LLM-GEs
with different LLMs, including Mixtral-8x7B,
Llama-3.1-70B, and Llama-3.3-70B, to assess their
impacts on the evolutionary process. Our analysis shows
that GE with Llama-3.1 and Llama-3.3 surpassed
Mixtral-8x7B in performance, underscoring the
importance of LLMs selection in refining evolutionary
algorithms. These results highlight the flexibility and
effectiveness of GE in real-world computer vision
challenges, offering a novel paradigm for autonomous
model optimization that combines LLM-driven reasoning
with evolutionary strategies.",
-
notes = "GECCO-2025 EML A Recombination of the 34th
International Conference on Genetic Algorithms (ICGA)
and the 30th Annual Genetic Programming Conference
(GP)",
- }
Genetic Programming entries for
YiMing Yu
Jason Zutty
Citations