Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions deepmd/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
"global_float_prec",
]

log = logging.getLogger(__name__)

# FLOAT_PREC
dp_float_prec = os.environ.get("DP_INTERFACE_PREC", "high").lower()
if dp_float_prec in ("high", ""):
Expand Down Expand Up @@ -47,7 +49,7 @@ def set_env_if_empty(key: str, value: str, verbose: bool = True):
if os.environ.get(key) is None:
os.environ[key] = value
if verbose:
logging.warning(
log.warning(
f"Environment variable {key} is empty. Use the default value {value}"
)

Expand All @@ -72,7 +74,7 @@ def set_default_nthreads():
and "TF_INTER_OP_PARALLELISM_THREADS" not in os.environ
)
):
logging.warning(
log.warning(
"To get the best performance, it is recommended to adjust "
"the number of threads by setting the environment variables "
"OMP_NUM_THREADS, DP_INTRA_OP_PARALLELISM_THREADS, and "
Expand Down
34 changes: 13 additions & 21 deletions deepmd/pt/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@
from deepmd.infer.model_devi import (
make_model_devi,
)
from deepmd.loggers.loggers import (
set_log_handles,
)
from deepmd.main import (
parse_args,
)
Expand All @@ -42,9 +45,6 @@
from deepmd.pt.train import (
training,
)
from deepmd.pt.utils import (
env,
)
from deepmd.pt.utils.dataloader import (
DpLoaderSet,
)
Expand All @@ -58,6 +58,8 @@
make_stat_input,
)

log = logging.getLogger(__name__)


def get_trainer(
config,
Expand Down Expand Up @@ -237,7 +239,7 @@ def prepare_trainer_input_single(


def train(FLAGS):
logging.info("Configuration path: %s", FLAGS.INPUT)
log.info("Configuration path: %s", FLAGS.INPUT)
with open(FLAGS.INPUT) as fin:
config = json.load(fin)
trainer = get_trainer(
Expand Down Expand Up @@ -278,28 +280,18 @@ def freeze(FLAGS):
)


# avoid logger conflicts of tf version
def clean_loggers():
logger = logging.getLogger()
while logger.hasHandlers():
logger.removeHandler(logger.handlers[0])


@record
def main(args: Optional[Union[List[str], argparse.Namespace]] = None):
clean_loggers()

if not isinstance(args, argparse.Namespace):
FLAGS = parse_args(args=args)
else:
FLAGS = args
dict_args = vars(FLAGS)

logging.basicConfig(
level=logging.WARNING if env.LOCAL_RANK else logging.INFO,
format=f"%(asctime)-15s {os.environ.get('RANK') or ''} [%(filename)s:%(lineno)d] %(levelname)s %(message)s",
)
logging.info("DeepMD version: %s", __version__)
set_log_handles(FLAGS.log_level, FLAGS.log_path, mpi_log=None)
log.debug("Log handles were successfully set")

log.info("DeepMD version: %s", __version__)

if FLAGS.command == "train":
train(FLAGS)
Expand All @@ -315,17 +307,17 @@ def main(args: Optional[Union[List[str], argparse.Namespace]] = None):
FLAGS.model = FLAGS.checkpoint_folder
FLAGS.output = str(Path(FLAGS.output).with_suffix(".pth"))
freeze(FLAGS)
elif args.command == "doc-train-input":
elif FLAGS.command == "doc-train-input":
doc_train_input(**dict_args)
elif args.command == "model-devi":
elif FLAGS.command == "model-devi":
dict_args["models"] = [
str(Path(mm).with_suffix(".pt"))
if Path(mm).suffix not in (".pb", ".pt")
else mm
for mm in dict_args["models"]
]
make_model_devi(**dict_args)
elif args.command == "gui":
elif FLAGS.command == "gui":
start_dpgui(**dict_args)
else:
raise RuntimeError(f"Invalid command {FLAGS.command}!")
Expand Down
31 changes: 15 additions & 16 deletions deepmd/pt/infer/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@

if torch.__version__.startswith("2"):
import torch._dynamo
log = logging.getLogger(__name__)


class Tester:
Expand Down Expand Up @@ -95,9 +96,7 @@ def __init__(
), f"Validation systems not found in {input_script}!"
self.systems = training_params["validation_data"]["systems"]
self.batchsize = training_params["validation_data"]["batch_size"]
logging.info(
f"Testing validation systems in input script: {input_script}"
)
log.info(f"Testing validation systems in input script: {input_script}")
else:
assert (
"data_dict" in training_params
Expand All @@ -115,18 +114,18 @@ def __init__(
self.batchsize = training_params["data_dict"][head]["validation_data"][
"batch_size"
]
logging.info(
log.info(
f"Testing validation systems in head {head} of input script: {input_script}"
)
elif system is not None:
self.systems = expand_sys_str(system)
self.batchsize = "auto"
logging.info("Testing systems in path: %s", system)
log.info("Testing systems in path: %s", system)
elif datafile is not None:
with open(datafile) as fin:
self.systems = fin.read().splitlines()
self.batchsize = "auto"
logging.info("Testing systems in file: %s", datafile)
log.info("Testing systems in file: %s", datafile)
else:
self.systems = None
self.batchsize = None
Expand Down Expand Up @@ -210,8 +209,8 @@ def run(self):
system_results = {}
global_sum_natoms = 0
for cc, system in enumerate(systems):
logging.info("# ---------------output of dp test--------------- ")
logging.info(f"# testing system : {system}")
log.info("# ---------------output of dp test--------------- ")
log.info(f"# testing system : {system}")
system_pred = []
system_label = []
dataset = DpLoaderSet(
Expand All @@ -226,7 +225,7 @@ def run(self):
dataset, replacement=True, num_samples=dataset.total_batch
)
if sampler is None:
logging.warning(
log.warning(
"Sampler not specified!"
) # None sampler will lead to a premature stop iteration. Replacement should be True in attribute of the sampler to produce expected number of items in one iteration.
dataloader = DataLoader(
Expand Down Expand Up @@ -296,8 +295,8 @@ def run(self):
for k, v in single_results.items()
}
for item in sorted(results.keys()):
logging.info(f"{item}: {results[item]:.4f}")
logging.info("# ----------------------------------------------- ")
log.info(f"{item}: {results[item]:.4f}")
log.info("# ----------------------------------------------- ")
for k, v in single_results.items():
system_results[k] = system_results.get(k, 0.0) + v
global_sum_natoms += sum_natoms
Expand All @@ -306,14 +305,14 @@ def run(self):
k: v / global_sum_natoms if "mae" in k else math.sqrt(v / global_sum_natoms)
for k, v in system_results.items()
}
logging.info("# ----------weighted average of errors----------- ")
log.info("# ----------weighted average of errors----------- ")
if not self.multi_task:
logging.info(f"# number of systems : {len(systems)}")
log.info(f"# number of systems : {len(systems)}")
else:
logging.info(f"# number of systems for {self.head}: {len(systems)}")
log.info(f"# number of systems for {self.head}: {len(systems)}")
for item in sorted(global_results.keys()):
logging.info(f"{item}: {global_results[item]:.4f}")
logging.info("# ----------------------------------------------- ")
log.info(f"{item}: {global_results[item]:.4f}")
log.info("# ----------------------------------------------- ")
return global_results


Expand Down
10 changes: 6 additions & 4 deletions deepmd/pt/model/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
compute_output_stats,
)

log = logging.getLogger(__name__)


class BaseModel(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -55,7 +57,7 @@ def compute_or_load_stat(
if not os.path.exists(stat_file_dir):
os.mkdir(stat_file_dir)
if not isinstance(stat_file_path, list):
logging.info(f"Saving stat file to {stat_file_path}")
log.info(f"Saving stat file to {stat_file_path}")
np.savez_compressed(
stat_file_path,
sumr=sumr,
Expand All @@ -68,7 +70,7 @@ def compute_or_load_stat(
)
else:
for ii, file_path in enumerate(stat_file_path):
logging.info(f"Saving stat file to {file_path}")
log.info(f"Saving stat file to {file_path}")
np.savez_compressed(
file_path,
sumr=sumr[ii],
Expand All @@ -82,7 +84,7 @@ def compute_or_load_stat(
else: # load stat
target_type_map = type_map
if not isinstance(stat_file_path, list):
logging.info(f"Loading stat file from {stat_file_path}")
log.info(f"Loading stat file from {stat_file_path}")
stats = np.load(stat_file_path)
stat_type_map = list(stats["type_map"])
missing_type = [
Expand All @@ -105,7 +107,7 @@ def compute_or_load_stat(
sumr, suma, sumn, sumr2, suma2 = [], [], [], [], []
id_bias_atom_e = None
for ii, file_path in enumerate(stat_file_path):
logging.info(f"Loading stat file from {file_path}")
log.info(f"Loading stat file from {file_path}")
stats = np.load(file_path)
stat_type_map = list(stats["type_map"])
missing_type = [
Expand Down
4 changes: 3 additions & 1 deletion deepmd/pt/model/task/dipole.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
Fitting,
)

log = logging.getLogger(__name__)


class DipoleFittingNetType(Fitting):
def __init__(
Expand Down Expand Up @@ -37,7 +39,7 @@ def __init__(
self.filter_layers = torch.nn.ModuleList(filter_layers)

if "seed" in kwargs:
logging.info("Set seed to %d in fitting net.", kwargs["seed"])
log.info("Set seed to %d in fitting net.", kwargs["seed"])
torch.manual_seed(kwargs["seed"])

def forward(self, inputs, atype, atype_tebd, rot_mat):
Expand Down
6 changes: 4 additions & 2 deletions deepmd/pt/model/task/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
dtype = env.GLOBAL_PT_FLOAT_PRECISION
device = env.DEVICE

log = logging.getLogger(__name__)


@fitting_check_output
class InvarFitting(Fitting):
Expand Down Expand Up @@ -153,7 +155,7 @@ def __init__(

# very bad design...
if "seed" in kwargs:
logging.info("Set seed to %d in fitting net.", kwargs["seed"])
log.info("Set seed to %d in fitting net.", kwargs["seed"])
torch.manual_seed(kwargs["seed"])

def output_def(self) -> FittingOutputDef:
Expand Down Expand Up @@ -451,7 +453,7 @@ def __init__(
self.filter_layers = torch.nn.ModuleList(filter_layers)

if "seed" in kwargs:
logging.info("Set seed to %d in fitting net.", kwargs["seed"])
log.info("Set seed to %d in fitting net.", kwargs["seed"])
torch.manual_seed(kwargs["seed"])

def output_def(self):
Expand Down
8 changes: 5 additions & 3 deletions deepmd/pt/model/task/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
make_stat_input,
)

log = logging.getLogger(__name__)


class Fitting(torch.nn.Module, BaseFitting):
__plugins = Plugin()
Expand Down Expand Up @@ -115,7 +117,7 @@ def change_energy_bias(
ntest : int
The number of test samples in a system to change the energy bias.
"""
logging.info(
log.info(
"Changing energy bias in pretrained model for types {}... "
"(this step may take long time)".format(str(new_type_map))
)
Expand Down Expand Up @@ -188,7 +190,7 @@ def change_energy_bias(
self.bias_atom_e[idx_type_map] += torch.from_numpy(
delta_bias.reshape(-1)
).to(DEVICE)
logging.info(
log.info(
f"RMSE of atomic energy after linear regression is: {rmse_ae:10.5e} eV/atom."
)
elif bias_shift == "statistic":
Expand All @@ -202,7 +204,7 @@ def change_energy_bias(
)
else:
raise RuntimeError("Unknown bias_shift mode: " + bias_shift)
logging.info(
log.info(
"Change energy bias of {} from {} to {}.".format(
str(new_type_map),
str(old_bias.detach().cpu().numpy()),
Expand Down
4 changes: 3 additions & 1 deletion deepmd/pt/optimizer/LKF.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
Optimizer,
)

log = logging.getLogger(__name__)


class LKFOptimizer(Optimizer):
def __init__(
Expand Down Expand Up @@ -59,7 +61,7 @@ def __init_P(self):

P = []
params_packed_index = []
logging.info("LKF parameter nums: %s" % param_nums)
log.info("LKF parameter nums: %s" % param_nums)
for param_num in param_nums:
if param_num >= block_size:
block_num = math.ceil(param_num / block_size)
Expand Down
Loading