Skip to content

Commit

Permalink
[Fix] restore 'by_epoch' for SchedulerList and fix EPNN (#777)
Browse files Browse the repository at this point in the history
* restore 'by_epoch' for SchedulerList

* fix for epnn
  • Loading branch information
HydrogenSulfate authored Feb 4, 2024
1 parent 988fd33 commit eaff0fc
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 6 deletions.
19 changes: 14 additions & 5 deletions examples/epnn/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,11 @@ def __init__(self, data_state, data_stress, itrain):
self.data_stress = data_stress
self.itrain = itrain

def _cvt_to_ndarray(self, list_dict):
for key in list_dict:
list_dict[key] = np.asarray(list_dict[key])
return list_dict

def get(self, epochs=1):
# Slow if using BatchSampler to obtain data
input_dict_train = {
Expand All @@ -243,7 +248,7 @@ def get(self, epochs=1):
label_dict_train = {"dummy_loss": []}
label_dict_val = {"dummy_loss": []}
for i in range(epochs):
shuffled_indices = paddle.randperm(n=self.data_state.x_train.shape[0])
shuffled_indices = np.random.permutation(self.data_state.x_train.shape[0])
input_dict_train["state_x"].append(
self.data_state.x_train[shuffled_indices[0 : self.itrain]]
)
Expand All @@ -256,9 +261,9 @@ def get(self, epochs=1):
input_dict_train["stress_y"].append(
self.data_stress.y_train[shuffled_indices[0 : self.itrain]]
)
label_dict_train["dummy_loss"].append(paddle.to_tensor(0.0))
label_dict_train["dummy_loss"].append(0.0)

shuffled_indices = paddle.randperm(n=self.data_state.x_valid.shape[0])
shuffled_indices = np.random.permutation(self.data_state.x_valid.shape[0])
input_dict_val["state_x"].append(
self.data_state.x_valid[shuffled_indices[0 : self.itrain]]
)
Expand All @@ -271,7 +276,11 @@ def get(self, epochs=1):
input_dict_val["stress_y"].append(
self.data_stress.y_valid[shuffled_indices[0 : self.itrain]]
)
label_dict_val["dummy_loss"].append(paddle.to_tensor(0.0))
label_dict_val["dummy_loss"].append(0.0)
input_dict_train = self._cvt_to_ndarray(input_dict_train)
label_dict_train = self._cvt_to_ndarray(label_dict_train)
input_dict_val = self._cvt_to_ndarray(input_dict_val)
label_dict_val = self._cvt_to_ndarray(label_dict_val)
return input_dict_train, label_dict_train, input_dict_val, label_dict_val


Expand All @@ -287,7 +296,7 @@ def __init__(self, dataset_path, train_p=0.6, cross_valid_p=0.2, test_p=0.2):
def get_shuffled_data(self):
# Need to set the seed, otherwise the loss will not match the precision
ppsci.utils.misc.set_random_seed(seed=10)
shuffled_indices = paddle.randperm(n=self.x.shape[0])
shuffled_indices = np.random.permutation(self.x.shape[0])
n_train = math.floor(self.train_p * self.x.shape[0])
n_cross_valid = math.floor(self.cross_valid_p * self.x.shape[0])
n_test = math.floor(self.test_p * self.x.shape[0])
Expand Down
5 changes: 4 additions & 1 deletion ppsci/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,10 @@ def build_dataloader(_dataset, cfg):
logger.warning(
"`batch_size` is set to 1 as neither sampler config nor batch_size is set."
)
batch_sampler = None
batch_sampler = io.BatchSampler(
_dataset,
batch_size=cfg["batch_size"],
)

# build collate_fn if specified
batch_transforms_cfg = cfg.pop("batch_transforms", None)
Expand Down
1 change: 1 addition & 0 deletions ppsci/optimizer/lr_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,7 @@ class SchedulerList:
def __init__(self, scheduler_list: Tuple[lr.LRScheduler, ...]):
super().__init__()
self._sch_list = scheduler_list
self.by_epoch = False

def step(self):
for sch in self._sch_list:
Expand Down

0 comments on commit eaff0fc

Please sign in to comment.