Emergent Tangled Program Graphs in Multi-Task Learning
Created by W.Langdon from
gp-bibliography.bib Revision:1.7964
- @InProceedings{ijcai2018p740,
-
author = "Stephen Kelly and Malcolm Heywood",
-
title = "Emergent Tangled Program Graphs in Multi-Task
Learning",
-
booktitle = "Proceedings of the Twenty-Seventh International Joint
Conference on Artificial Intelligence, IJCAI-18",
-
year = "2018",
-
pages = "5294--5298",
-
publisher = "AAAI",
-
keywords = "genetic algorithms, genetic programming, TPG, Machine
Learning, Reinforcement Learning, Transfer, Adaptation,
Multi-task Learning, Multidisciplinary Topics and
Applications, Computer Games",
-
URL = "https://doi.org/10.24963/ijcai.2018/740",
-
URL = "https://www.ijcai.org/proceedings/2018/0740.pdf",
-
DOI = "doi:10.24963/ijcai.2018/740",
-
size = "5 pages",
-
abstract = "We propose a Genetic Programming (GP) framework to
address high-dimensional Multi-Task Reinforcement
Learning (MTRL) through emergent modularity. A
bottom-up process is assumed in which multiple programs
self-organize into collective decision-making entities,
or teams, which then further develop into multi-team
policy graphs, or Tangled Program Graphs (TPG). The
framework learns to play three Atari video games
simultaneously, producing a single control policy that
matches or exceeds leading results from (game-specific)
deep reinforcement learning in each game. More
importantly, unlike the representation assumed for deep
learning, TPG policies start simple and adaptively
complexify through interaction with the task
environment, resulting in agents that are exceedingly
simple, operating in real-time without specialised
hardware support such as GPUs.",
-
notes = "Replaced by \cite{Kelly:2021:TELO}",
- }
Genetic Programming entries for
Stephen Kelly
Malcolm Heywood
Citations