Skip to content

Commit

Permalink
test performace by naive nn training
Browse files Browse the repository at this point in the history
  • Loading branch information
ASSANDHOLE committed Oct 29, 2022
1 parent b5674d3 commit 27508c0
Show file tree
Hide file tree
Showing 7 changed files with 248 additions and 47 deletions.
39 changes: 22 additions & 17 deletions DTLZ_problem/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __init__(self, n_var=7, n_obj=3, delta1=0, delta2=0, **kwargs):
def _calc_pareto_front(self, ref_dirs=None):
if ref_dirs is None:
ref_dirs = get_ref_dirs(self.n_obj)
coefficient = max(0.5, 0.5*(100+self.delta1)*self.delta2)
coefficient = max(0.5, 0.5 * (100 + self.delta1) * self.delta2)
return coefficient * ref_dirs

def obj_func(self, X_, g):
Expand All @@ -79,15 +79,15 @@ def _evaluate(self, x, out, *args, **kwargs):


def pf_data(n_var: int, n_objective: int, delta1: int, delta2: int) -> np.ndarray:
problem = DTLZ1b(n_var=n_var, n_obj = n_objective, delta1=delta1, delta2=delta2) # change delta here
problem = DTLZ1b(n_var=n_var, n_obj=n_objective, delta1=delta1, delta2=delta2) # change delta here
ref_dirs = get_reference_directions("das-dennis", n_objective, n_partitions=12)
N = ref_dirs.shape[0]
# create the algorithm object
algorithm = NSGA3(pop_size=N, ref_dirs=ref_dirs)
# execute the optimization
res = minimize(problem,
algorithm,
termination=('n_gen', 100))
algorithm,
termination=('n_gen', 100))
return res.X


Expand Down Expand Up @@ -126,7 +126,7 @@ def create_dataset(problem_dim: Tuple[int, int], x=None, n_problem=None, spt_qry
Tuple[
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
], Tuple[float | None, float | None]]:
], Tuple[float | None, float | None]]:
"""
Parameters
----------
Expand Down Expand Up @@ -170,7 +170,7 @@ def create_dataset(problem_dim: Tuple[int, int], x=None, n_problem=None, spt_qry
delta1 = np.random.randint(0, 100, n_problem[i])
delta2 = np.random.randint(0, 10, n_problem[i])
delta.append([delta1, delta2])

if x is not None:
assert len(x) == 4
else:
Expand Down Expand Up @@ -233,7 +233,9 @@ def evaluate(x: np.ndarray, delta: Tuple[int, int],
y /= min_max[1]
return y

def get_pf(n_var: int, n_objectives: int, delta: Tuple[int, int], min_max: Tuple[float | None, float | None]) -> np.ndarray:

def get_pf(n_var: int, n_objectives: int, delta: Tuple[int, int],
min_max: Tuple[float | None, float | None]) -> np.ndarray:
"""
Parameters
----------
Expand All @@ -251,16 +253,18 @@ def get_pf(n_var: int, n_objectives: int, delta: Tuple[int, int], min_max: Tuple
np.ndarray
The parato front, shape (n_point, n_objectives)
"""
problem = DTLZ1b(n_var = n_var, n_obj = n_objectives, delta1=delta[0], delta2=delta[1])
problem = DTLZ1b(n_var=n_var, n_obj=n_objectives, delta1=delta[0], delta2=delta[1])
ref_dirs = get_reference_directions("das-dennis", n_objectives, n_partitions=12)
pf = problem.pareto_front(ref_dirs)
if min_max[0] is not None:
pf -= min_max[0]
pf /= min_max[1]
return pf

def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorithm, n_gen: int, min_max: Tuple[float | None, float | None]) -> Tuple[
np.ndarray, np.ndarray, np.ndarray, np.ndarray

def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorithm, n_gen: int,
min_max: Tuple[float | None, float | None]) -> Tuple[
np.ndarray, list, list
]:
"""
Parameters
Expand All @@ -280,21 +284,21 @@ def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorit
Returns
-------
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
Tuple[np.ndarray, list, list]
moea_pf: The parato front, shape (n_point, n_objectives)
n_evals: The number of function evaluations
igd: The IGD
"""
problem = DTLZ1b(n_var = n_var, n_obj = n_objectives, delta1=delta[0], delta2=delta[1]) # change delta here
problem = DTLZ1b(n_var=n_var, n_obj=n_objectives, delta1=delta[0], delta2=delta[1]) # change delta here
res = minimize(problem,
algorithm,
termination=('n_gen', n_gen),
save_history=True,
verbose=False)
algorithm,
termination=('n_gen', n_gen),
save_history=True,
verbose=False)
moea_pf = res.F

hist = res.history
hist_F,n_evals = [],[]
hist_F, n_evals = [], []
for algo in hist:
n_evals.append(algo.evaluator.n_eval)
opt = algo.opt
Expand All @@ -308,6 +312,7 @@ def get_moea_data(n_var: int, n_objectives: int, delta: Tuple[int, int], algorit
moea_pf /= min_max[1]
return moea_pf, n_evals, igd


def test():
n_dim = (8, 3)
train_set, test_set = create_dataset(n_dim, n_problem=(4, 2), spt_qry=(5, 20)) # (12, 5, 7)
Expand Down
1 change: 1 addition & 0 deletions examples/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from examples import example
from examples import example_sinewave
from .baseline_nn import train as train_baseline_nn
77 changes: 77 additions & 0 deletions examples/baseline_nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
from typing import Tuple

import numpy as np
import torch
import torch.nn as nn


class BaselineNn(nn.Module):
def __init__(self, n_dim: Tuple[int, int]):
super(BaselineNn, self).__init__()
self.n_args_in, self.n_args_out = n_dim
self.layers = nn.Sequential(
nn.Linear(self.n_args_in, 2 * self.n_args_in),
nn.ReLU(),
nn.Linear(2 * self.n_args_in, 4 * self.n_args_in),
nn.ReLU(),
nn.Linear(4 * self.n_args_in, 4 * self.n_args_in),
nn.ReLU(),
nn.Linear(4 * self.n_args_in, 2 * self.n_args_in),
nn.ReLU(),
nn.Linear(2 * self.n_args_in, self.n_args_out),
)

def forward(self, x, return_tensor=False):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float().to(self.args.device)
self.to(self.args.device)
if len(x.shape) == 1:
x = x.reshape(1, -1)
ret = self.layers(x)
return ret if return_tensor else ret.detach().cpu().numpy()


class Wrapper:
def __init__(self, shape):
self.shape = shape
self.models = [BaselineNn((shape[0], 1)) for _ in range(shape[1])]

def __call__(self, x, *args, **kwargs):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float().to(self.args.device)
for m in self.models:
m.__dict__['args'] = self.args
ys = []
for i in range(self.shape[1]):
ys.append(self.models[i](x).flatten()[0])
ret = np.array(ys)
if 'return_tensor' in kwargs and kwargs['return_tensor']:
return torch.from_numpy(ret).float().to(self.args.device)
return ret


__model = None


def train(x: torch.Tensor, y: torch.Tensor, shape: Tuple[int, int],
init: bool = False, lr: float = 0.001, n_epochs: int = 1000):
global __model
if init:
__model = Wrapper(shape)
return __model
for i, s in enumerate(__model.models):
optimizer = torch.optim.Adam(s.parameters(), lr=lr)
loss_fn = torch.nn.MSELoss()
y_tensor = torch.from_numpy(y[i]).float().to(s.args.device)
loss_list = []
for _ in range(n_epochs):
optimizer.zero_grad()
y_pred = s(x, return_tensor=True)
loss = loss_fn(y_pred, y_tensor)
loss_list.append(loss.item())
loss.backward()
optimizer.step()

print(f'Loss: {loss_list[-1]}, avg loss: {np.mean(loss_list)}+-{np.std(loss_list)}')

return __model
19 changes: 14 additions & 5 deletions examples/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@
def get_args():
args = NamedDict()
args.problem_dim = (8, 3)
args.train_test = (20, 1)
args.train_test = (15, 1)
args.epoch = 50
args.update_lr = 0.01
args.meta_lr = 0.001
args.k_spt = 10
args.k_qry = 100
args.update_step = 5
args.update_step_test = 8
args.k_spt = 100
args.k_qry = 200
args.update_step = 30
args.update_step_test = 50
args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# args.device = torch.device('cpu')
return args
Expand All @@ -37,6 +37,15 @@ def get_network_structure(args):
('linear', [2 * n_args, 4 * n_args]),
('relu', [True]),
('linear', [1, 2 * n_args]),
# ('linear', [100, n_args]),
# ('relu', [True]),
# ('linear', [200, 100]),
# ('relu', [True]),
# ('linear', [200, 200]),
# ('relu', [True]),
# ('linear', [100, 200]),
# ('relu', [True]),
# ('linear', [1, 100]),
]
return config

Expand Down
Loading

0 comments on commit 27508c0

Please sign in to comment.