Skip to content

Commit

Permalink
merging from private repo
Browse files Browse the repository at this point in the history
  • Loading branch information
NielsenErik committed Sep 27, 2023
1 parent e384271 commit aeb1e95
Show file tree
Hide file tree
Showing 11,230 changed files with 11,716 additions and 1,753,833 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1 @@
src/marl-qd-code
src/pyenv-marl-qd/*
2 changes: 2 additions & 0 deletions references/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ The core topics are Evolutionaty Algorithms and Quality Diversity on Multi-Agent
- [Novelty Seeking Multiagent Evolutionary Reinforcement Learning](https://dl.acm.org/doi/pdf/10.1145/3583131.3590428), Ayhan Alp Aydeniz, Robert Loftin, Kagan Tumer.
- [Quality-diversity in dissimilarity spaces](https://dl.acm.org/doi/pdf/10.1145/3583131.3590409), Steve Huntsman.
- [Bayesian Quality Diversity Search with Interactive Illumination](https://dl.acm.org/doi/pdf/10.1145/3583131.3590486), Paul Kent, Juergen Branke.
- [Quality-Diversity Optimization with MAP-Elites and Surrogate Models](https://dl.acm.org/doi/pdf/10.1145/3377930.3389813), Antoine Cully, Jeff Clune, Danesh Tarapore, Jean-Baptiste Mouret.
- [Interpretable pipelines with evolutionarily optimized modules for reinforcement learning tasks with visual inputs](https://arxiv.org/pdf/2202.04943.pdf)

## Environments
### Farama
Expand Down
Binary file added src/QD_MARL/__pycache__/agents.cpython-311.pyc
Binary file not shown.
Binary file not shown.
Binary file added src/QD_MARL/__pycache__/utils.cpython-311.pyc
Binary file not shown.
72 changes: 72 additions & 0 deletions src/QD_MARL/agents.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
class Agent:
def __init__(self, name, squad, set_, tree, manual_policy, to_optimize):
self._name = name
self._squad = squad
self._set = set_
self._tree = tree.deep_copy() if tree is not None else None
self._manual_policy = manual_policy
self._to_optimize = to_optimize
self._score = []

def get_name(self):
return self._name

def get_squad(self):
return self._squad

def get_set(self):
return self._set

def to_optimize(self):
return self._to_optimize

def get_tree(self):
return self._tree.deep_copy()

def get_output(self, observation):
if self._to_optimize:
return self._tree.get_output(observation)
else:
return self._manual_policy.get_output(observation)

def set_reward(self, reward):
self._tree.set_reward(reward)
self._score[-1] = reward

def get_score_statistics(self, params):
score_values = [score_dict[key] for score_dict in self._score for key in score_dict]
return getattr(np, f"{params['type']}")(a=score_values, **params['params'])#Can't compare dicts with >

def new_episode(self):
self._score.append(0)

def has_policy(self):
return not self._manual_policy is None

def __str__(self):
return f"Name: {self._name}; Squad: {self._squad}; Set: {self._set}; Optimize: {str(self._to_optimize)}"

class CoachAgent(Agent):
def __init__(self, name, squad, set_, tree, manual_policy, to_optimize):
super().__init__(name, squad, set_, tree, manual_policy, to_optimize)

def get_output(self, observation):
return super().get_output(observation)

def set_reward(self, reward):
return super().set_reward(reward)

def get_score_statistics(self, params):
return super().get_score_statistics(params)

def new_episode(self):
return super().new_episode()

def has_policy(self):
return super().has_policy()

def __str__(self):
return super().__str__()

def select_team(self):
pass
13 changes: 13 additions & 0 deletions src/QD_MARL/algorithms/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from .common import *
from .continuous_optimization import *
from .genetic_algorithm import *
from .genetic_programming import *
from .grammatical_evolution import *
from .individuals import *
from .map_elites import *
from .map_elites_ge import *
from .map_elites_Pyribs import *
from .mapElitesCMA_pyRibs import *
from .mapElitesCMA import *
from .mapElitesCMA_pyRibs_GE import *

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
49 changes: 49 additions & 0 deletions src/QD_MARL/algorithms/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
src.common
~~~~~~~~~~
This module contains common utilities for optimizers
:copyright: (c) 2021 by Leonardo Lucio Custode.
:license: MIT, see LICENSE for more details.
"""


class OptMetaClass(type):
_registry = {}

def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
OptMetaClass._registry[cls.__name__] = cls
return cls

@staticmethod
def get(class_name):
"""
Retrieves the class associated to the string
:class_name: The name of the class
:returns: A class
"""
return OptMetaClass._registry[class_name]


class ContinuousOptimizationMetaClass(type):
_registry = {}

def __init__(cls, clsname, bases, methods):
super().__init__(clsname, bases, methods)
ContinuousOptimizationMetaClass._registry[cls.__name__] = cls

@staticmethod
def get(class_name):
"""
Retrieves the class associated to the string
:class_name: The name of the class
:returns: A class
"""
return ContinuousOptimizationMetaClass._registry[class_name]

Loading

0 comments on commit aeb1e95

Please sign in to comment.