Skip to content

Commit

Permalink
Merge pull request #7 from ASSANDHOLE/agy_refine
Browse files Browse the repository at this point in the history
  • Loading branch information
ASSANDHOLE authored Dec 8, 2022
2 parents ca842c9 + fcdcfda commit c6ed994
Show file tree
Hide file tree
Showing 21 changed files with 1,229 additions and 397 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,7 @@ __pycache__/
*.py[cod]
.cache
*$py.class
*.pkl
temp*.py
nas/config.json
record.txt
2 changes: 1 addition & 1 deletion DTLZ_problem/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .dataset import create_dataset, evaluate, get_pf, get_moea_data
from .problem import DTLZ1b, DTLZ4c, DTLZbProblem, get_problem
from .problem import DTLZbProblem, get_custom_problem
121 changes: 85 additions & 36 deletions DTLZ_problem/dataset.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
from __future__ import annotations

from typing import Tuple, List, Any
from typing import Tuple, List, Any, Literal
from multiprocessing import Pool

import numpy as np
from pymoo.util.ref_dirs import get_reference_directions
from pymoo.optimize import minimize
from pymoo.indicators.igd import IGD
from pymoo.algorithms.moo.nsga3 import NSGA3
from .problem import get_problem
try:
from .problem import get_custom_problem
except ImportError:
# for testing
from problem import get_custom_problem


def pf_data(n_var: int, n_objective: int, delta1: int, delta2: int, problem_name: str) -> np.ndarray:
problem = get_problem(name=problem_name, n_var=n_var, n_obj=n_objective, delta1=delta1, delta2=delta2) # change delta here
problem = get_custom_problem(name=problem_name, n_var=n_var, n_obj=n_objective, delta1=delta1, delta2=delta2) # change delta here
ref_dirs = get_reference_directions("das-dennis", n_objective, n_partitions=12)
N = ref_dirs.shape[0]
# create the algorithm object
Expand All @@ -23,7 +28,7 @@ def pf_data(n_var: int, n_objective: int, delta1: int, delta2: int, problem_name
return res.X


def create_dataset_inner(x, n_dim: Tuple[int, int], delta: Tuple[List[int], List[int]], problem_name: str) -> Tuple[
def create_dataset_inner_0d(x, n_dim: Tuple[int, int], delta: Tuple[List[int | float], List[int | float]], problem_name: List[str]) -> Tuple[
np.ndarray, np.ndarray
]:
"""
Expand All @@ -33,9 +38,9 @@ def create_dataset_inner(x, n_dim: Tuple[int, int], delta: Tuple[List[int], List
The input data, shape (n_problem, n_spt, n_variables)
n_dim : Tuple[int, int]
The number of variables and number of objectives
delta : Tuple[List[int], List[int]]
delta : Tuple[List[int | float], List[int | float]]
The delta1 and delta2 for each problem
problem_name : str
problem_name : List[str]
The problem name
Returns
Expand All @@ -48,15 +53,39 @@ def create_dataset_inner(x, n_dim: Tuple[int, int], delta: Tuple[List[int], List
n_var, n_obj = n_dim
y = []
for i in range(n_problem):
problem = get_problem(name=problem_name, n_var=n_var, n_obj=n_obj, delta1=delta1[i], delta2=delta2[i])
problem = get_custom_problem(name=problem_name[i%len(problem_name)], n_var=n_var, n_obj=n_obj, delta1=delta1[i], delta2=delta2[i])
y.extend([*problem.evaluate(x[i]).transpose()])
y = np.array(y).astype(np.float32)
new_x = np.repeat(x, n_obj, axis=0).astype(np.float32)
return new_x, y


def create_dataset(problem_dim: Tuple[int, int], problem_name: str, x=None, n_problem=None, spt_qry=None, delta=None,
normalize_targets=True, **_) -> Tuple[
def create_dataset_inner_1d(x, n_dim: Tuple[int, int], delta: Tuple[List[int | float], List[int | float]], problem_name: List[str]) -> Tuple[
np.ndarray, np.ndarray
]:
delta1, delta2 = delta
n_problem = len(delta1)
n_var, n_obj = n_dim
y = []
for i in range(n_problem):
problem = get_custom_problem(name=problem_name[i%len(problem_name)], n_var=n_var, n_obj=n_obj, delta1=delta1[i], delta2=delta2[i])
y.extend([problem.evaluate(x[i])])
y = np.array(y).astype(np.float32)
x = np.array(x).astype(np.float32)
return x, y


def create_dataset(problem_dim: Tuple[int, int],
problem_name: List[str],
test_problem_name: List[str],
x=None,
n_problem=None,
spt_qry=None,
delta=None,
normalize_targets=True,
dim: Literal[0, 1] = 0,
pf_ratio: float = 0.5,
**_) -> Tuple[
Tuple[
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
Expand All @@ -66,8 +95,8 @@ def create_dataset(problem_dim: Tuple[int, int], problem_name: str, x=None, n_pr
----------
problem_dim : Tuple[int, int]
The number of variables and number of objectives for each problem
problem_name : str
The problem name
problem_name : List[str]
The problem names
x : Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
[x_spt_train, x_qry_train, x_spt_test, x_qry_test]
The input data, shape (4, n_problem, n_spt, n_variables)
Expand All @@ -79,6 +108,10 @@ def create_dataset(problem_dim: Tuple[int, int], problem_name: str, x=None, n_pr
The number of support and query points for each problem
normalize_targets : bool
Whether to normalize the targets
dim : int
The dimension of the problem
pf_ratio : float
The ratio of the Pareto front to the whole dataset
Returns
-------
Expand All @@ -95,14 +128,19 @@ def create_dataset(problem_dim: Tuple[int, int], problem_name: str, x=None, n_pr
In the second Tuple:
The minimum and maximum of the targets (to be used for normalization)
"""
def generate_x(_i, _j):
create_dataset_inner = create_dataset_inner_0d if dim == 0 else create_dataset_inner_1d

def generate_x(_i, _j, ratio_of_pf: float = 0.5):
x_pf = []
for k in range(n_problem[_i]):
ps = pf_data(n_var, n_obj, delta[_i][_j][k], delta[_i][_j][k], problem_name)
x_pf.append(ps[np.random.choice(ps.shape[0], int(0.5 * spt_qry[_j]))])
x_pf = np.array(x_pf)
x_ran = np.random.rand(n_problem[_i], spt_qry[_j] - int(0.5 * spt_qry[_j]), n_var)
return np.concatenate((x_pf, x_ran), axis=1)
pf_num = int(ratio_of_pf * spt_qry[_j])
pf_num = pf_num if pf_num > 0 else 0
if pf_num > 0:
for k in range(n_problem[_i]):
ps = pf_data(n_var, n_obj, delta[_i][_j][k], delta[_i][_j][k], problem_name)
x_pf.append(ps[np.random.choice(ps.shape[0], pf_num)])
x_pf = np.array(x_pf)
x_ran = np.random.rand(n_problem[_i], spt_qry[_j] - pf_num, n_var)
return np.concatenate((x_pf, x_ran), axis=1) if pf_num > 0 else x_ran

n_var, n_obj = problem_dim

Expand All @@ -122,11 +160,11 @@ def generate_x(_i, _j):
for i in range(2):
for j in range(2):
if x[i * 2 + j] is None:
x[i * 2 + j] = generate_x(i, j)
x[i * 2 + j] = generate_x(i, j, pf_ratio)
# x.append(np.random.rand(n_problem[i], spt_qry[j], n_var))

train_set = [*create_dataset_inner(x[0], problem_dim, delta[0], problem_name), *create_dataset_inner(x[1], problem_dim, delta[0], problem_name)]
test_set = [*create_dataset_inner(x[2], problem_dim, delta[1], problem_name), *create_dataset_inner(x[3], problem_dim, delta[1], problem_name)]
test_set = [*create_dataset_inner(x[2], problem_dim, delta[1], test_problem_name), *create_dataset_inner(x[3], problem_dim, delta[1], test_problem_name)]
if normalize_targets:
minimum = np.min(np.concatenate([train_set[1], test_set[1]]), axis=None)
train_set[1] -= minimum
Expand Down Expand Up @@ -166,7 +204,7 @@ def evaluate(x: np.ndarray, delta: Tuple[int, int], n_objectives: int, problem_n
The output data, shape (n_point, n_objectives)
"""
n_variables = x.shape[1]
problem = get_problem(name=problem_name, n_var=n_variables, n_obj=n_objectives, delta1=delta[0], delta2=delta[1])
problem = get_custom_problem(name=problem_name, n_var=n_variables, n_obj=n_objectives, delta1=delta[0], delta2=delta[1])
y = problem.evaluate(x)
if min_max[0] is not None:
y -= min_max[0]
Expand Down Expand Up @@ -202,8 +240,8 @@ def evaluate(x: np.ndarray, delta: Tuple[int, int], n_objectives: int, problem_n
# return pf


def get_pf(n_var: int, n_objectives: int, delta: Tuple[int, int], problem_name: str,
min_max: Tuple[float | None, float | None]) -> np.ndarray:
def get_pf(n_objectives: int, problem: Any,
min_max: Tuple[float | None, float | None] | None = None) -> np.ndarray:
"""
Parameters
----------
Expand All @@ -221,9 +259,10 @@ def get_pf(n_var: int, n_objectives: int, delta: Tuple[int, int], problem_name:
Returns
-------
np.ndarray
The parato front, shape (n_point, n_objectives)
The Pareto front, shape (n_point, n_objectives)
"""
problem = get_problem(name=problem_name, n_var=n_var, n_obj=n_objectives, delta1=delta[0], delta2=delta[1]) # change delta here
# change delta here
# problem = get_problem(name=problem_name, n_var=n_var, n_obj=n_objectives, delta1=delta[0], delta2=delta[1])
ref_dirs = get_reference_directions("das-dennis", n_objectives, n_partitions=12)
N = ref_dirs.shape[0]
# create the algorithm object
Expand All @@ -233,13 +272,18 @@ def get_pf(n_var: int, n_objectives: int, delta: Tuple[int, int], problem_name:
algorithm,
termination=('n_gen', 100))
pf = res.F
if min_max[0] is not None:
if min_max is not None and min_max[0] is not None:
pf -= min_max[0]
pf /= min_max[1]
return pf


def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorithm: Any, n_gen: int, metric: Any, problem_name: str,
def get_moea_data(n_var: int,
n_objectives: int,
delta: Tuple[int, int],
algorithm: Any,
n_eval: int,
metric: Any, problem_name: str,
min_max: Tuple[float | None, float | None]) -> Tuple[
np.ndarray, list, list
]:
Expand All @@ -254,8 +298,8 @@ def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorit
The delta1 and delta2
algorithm:
MOEA algorithm
n_gen: int
number of generation
n_eval: int
number of function evaluations
metric:
The metric to calculate the IGD
problem_name : str
Expand All @@ -270,10 +314,10 @@ def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorit
n_evals: The number of function evaluations
igd: The IGD
"""
problem = get_problem(name=problem_name, n_var=n_var, n_obj=n_objectives, delta1=delta[0], delta2=delta[1]) # change delta here
problem = get_custom_problem(name=problem_name, n_var=n_var, n_obj=n_objectives, delta1=delta[0], delta2=delta[1]) # change delta here
res = minimize(problem,
algorithm,
termination=('n_gen', n_gen),
termination=('n_eval', n_eval),
save_history=True,
verbose=False)
moea_pf = res.F
Expand All @@ -283,8 +327,13 @@ def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorit
for algo in hist:
n_evals.append(algo.evaluator.n_eval)
opt = algo.opt
feas = np.where(opt.get("feasible"))[0]
hist_F.append(opt.get("F")[feas])
# feas = np.where(opt.get("feasible"))[0]
# hist_F.append(opt.get("F")[feas])
feas_pop = np.where(algo.pop.get("feasible"))[0]
feas_off = np.where(algo.off.get("feasible"))[0]
hist_F.append(np.concatenate([algo.pop.get("F")[feas_pop], algo.off.get("F")[feas_off]], axis=0))
if len(hist_F) > 1:
hist_F[-1] = np.unique(np.concatenate([hist_F[-2], hist_F[-1]], axis=0), axis=0)
if min_max[0] is not None:
for _F in hist_F:
_F -= min_max[0]
Expand All @@ -298,10 +347,10 @@ def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorit


def test():
n_dim = (8, 3)
train_set, test_set = create_dataset(n_dim, n_problem=(4, 2), spt_qry=(5, 20)) # (12, 5, 7)
n_dim = (10, 3)
train_set, test_set = create_dataset(problem_dim=n_dim, problem_name='DTLZ1b', n_problem=(4, 2), spt_qry=(5, 20), dim=1) # (12, 5, 7)
print(train_set[0].shape, train_set[1].shape, test_set[0].shape, test_set[1].shape)


if __name__ == '__main__':
test()
get_pf(n_objectives=3, problem=None)
70 changes: 61 additions & 9 deletions DTLZ_problem/problem.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np
from pymoo.core.problem import Problem
from pymoo.problems.many.dtlz import get_ref_dirs
from pymoo.util.remote import Remote

from maml_mod import MamlWrapper

Expand Down Expand Up @@ -86,8 +87,11 @@ def __init__(self, n_var, n_obj, delta1, delta2, k=None):
raise Exception("Either provide number of variables or k!")

super().__init__(n_var=n_var, n_obj=n_obj, n_constr=0, xl=0, xu=1, type_var=np.double)

def g1(self, X_M):
return 100 * (self.k + np.sum(np.square(X_M - 0.5) - np.cos(20 * np.pi * (X_M - 0.5)), axis=1))

def g2(self, X_M):
return np.sum(np.square(X_M - 0.5), axis=1)

def obj_func(self, X_, g, alpha=1):
Expand All @@ -105,23 +109,69 @@ def obj_func(self, X_, g, alpha=1):
f = np.column_stack(f)
return f


class DTLZ2c(DTLZc):
def __init__(self, n_var=10, n_obj=3, delta1=0, delta2=0, **kwargs):
super().__init__(n_var=n_var, n_obj=n_obj, delta1=delta1, delta2=delta2, **kwargs)

def _calc_pareto_front(self, ref_dirs=None):
pass

def _evaluate(self, x, out, *args, **kwargs):
X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]
g = self.g2(X_M)
out["F"] = self.obj_func(X_, g, alpha=1)


class DTLZ3c(DTLZc):
def __init__(self, n_var=10, n_obj=3, delta1=0, delta2=0, **kwargs):
super().__init__(n_var=n_var, n_obj=n_obj, delta1=delta1, delta2=delta2, **kwargs)

def _calc_pareto_front(self, ref_dirs=None):
pass

def _evaluate(self, x, out, *args, **kwargs):
X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]
g = self.g1(X_M)
out["F"] = self.obj_func(X_, g, alpha=1)


class DTLZ4c(DTLZc):
def __init__(self, n_var=10, n_obj=3, alpha=100, d=100, delta1=0, delta2=0, **kwargs):
super().__init__(n_var=n_var, n_obj=n_obj, delta1=delta1, delta2=delta2, **kwargs)
self.alpha = alpha
self.d = d

def _calc_pareto_front(self, ref_dirs=None):
if ref_dirs is None:
ref_dirs = get_ref_dirs(self.n_obj)
return ref_dirs / np.tile(np.linalg.norm(ref_dirs, axis=1)[:, None], (1, ref_dirs.shape[1]))
pass

def _evaluate(self, x, out, *args, **kwargs):
X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]
g = self.g1(X_M)
g = self.g2(X_M)
out["F"] = self.obj_func(X_, g, alpha=self.alpha)


class DTLZ7c(DTLZc):
def __init__(self, n_var=10, n_obj=3, delta1=0, delta2=0, **kwargs):
self.delta1 = delta1
self.delta2 = delta2
super().__init__(n_var=n_var, n_obj=n_obj, delta1=delta1, delta2=delta2, **kwargs)

def _calc_pareto_front(self):
pass

def _evaluate(self, x, out, *args, **kwargs):
f = []
for i in range(0, self.n_obj - 1):
f.append(x[:, i])
f = np.column_stack(f)

g = 1 + 9 / self.k * np.sum(x[:, -self.k:], axis=1) + self.delta1
h = self.n_obj - np.sum(f / (1 + g[:, None]) * (1 + np.sin(3 * np.pi * f)), axis=1)

out["F"] = np.column_stack([f, (1 + g) * h + self.delta2])


class DTLZbProblem(Problem):
def __init__(self, n_var: int, n_obj: int, sol: MamlWrapper):
self.sol = sol
Expand All @@ -141,14 +191,16 @@ def _evaluate(self, x, out, *args, **kwargs):
out["F"] = np.array(f)


def get_problem(name, *args, **kwargs):

def get_custom_problem(name, *args, **kwargs):
PROBLEM = {
"DTLZ1b": DTLZ1b,
"DTLZ2c": DTLZ2c,
"DTLZ3c": DTLZ3c,
"DTLZ4c": DTLZ4c,
"DTLZ1b": DTLZ1b
"DTLZ7c": DTLZ7c
}

if name not in PROBLEM:
raise Exception(f"Problem: {name} not found.")

return PROBLEM[name](*args, **kwargs)
return PROBLEM[name](*args, **kwargs)
2 changes: 2 additions & 0 deletions benchmarking/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
from .benchmark import benchmark_for_seeds
from .benchmark import benchmark_for_seeds_different_args
from .gpu_selector import GpuManager
Loading

0 comments on commit c6ed994

Please sign in to comment.