Skip to content

Fix how we set logging levels #3703

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 30, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,5 @@ disable =
# Appears to be https://github.com/PyCQA/pylint/issues/2981
W0201,

# Using the global statement
W0603,
7 changes: 3 additions & 4 deletions gym-unity/gym_unity/envs/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import itertools
import numpy as np
from typing import Any, Dict, List, Optional, Tuple, Union
Expand All @@ -8,6 +7,7 @@

from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs import logging_util


class UnityGymException(error.Error):
Expand All @@ -18,9 +18,8 @@ class UnityGymException(error.Error):
pass


logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("gym_unity")

logger = logging_util.get_logger(__name__)
logging_util.set_log_level(logging_util.INFO)

GymSingleStepResult = Tuple[np.ndarray, float, bool, Dict]
GymMultiStepResult = Tuple[List[np.ndarray], List[float], List[bool], Dict]
Expand Down
5 changes: 3 additions & 2 deletions ml-agents-envs/mlagents_envs/environment.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import atexit
import glob
import uuid
import logging
import numpy as np
import os
import subprocess
from typing import Dict, List, Optional, Any

import mlagents_envs

from mlagents_envs.logging_util import get_logger
from mlagents_envs.side_channel.side_channel import SideChannel, IncomingMessage

from mlagents_envs.base_env import (
Expand Down Expand Up @@ -47,7 +48,7 @@
import struct


logger = logging.getLogger("mlagents_envs")
logger = get_logger(__name__)


class UnityEnvironment(BaseEnv):
Expand Down
46 changes: 46 additions & 0 deletions ml-agents-envs/mlagents_envs/logging_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import logging # noqa I251
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is moved from mlagents_envs/logging_util.py and heavily modified.


CRITICAL = logging.CRITICAL
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Imported here so that calling code doesn't need to import logging

FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET

_loggers = set()
_log_level = NOTSET
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
LOG_FORMAT = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"


def get_logger(name: str) -> logging.Logger:
"""
Create a logger with the specified name. The logger will use the log level
specified by set_log_level()
"""
logger = logging.getLogger(name=name)

# If we've already set the log level, make sure new loggers use it
if _log_level != NOTSET:
logger.setLevel(_log_level)

# Keep track of this logger so that we can change the log level later
_loggers.add(logger)
return logger


def set_log_level(log_level: int) -> None:
"""
Set the ML-Agents logging level. This will also configure the logging format (if it hasn't already been set).
"""
global _log_level
_log_level = log_level

# Configure the log format.
# In theory, this would be sufficient, but if another library calls logging.basicConfig
# first, it doesn't have any effect.
logging.basicConfig(level=_log_level, format=LOG_FORMAT, datefmt=DATE_FORMAT)

for logger in _loggers:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In theory, if we use __name__ everywhere, we should be able to do something like

logging.getLogger("mlagents").setLevel(log_level)
logging.getLogger("mlagents_envs").setLevel(log_level)
logging.getLogger("gym_unity").setLevel(log_level)

here, and let the logger name hierarchy handle things, but I went for the explicit approach instead.

logger.setLevel(log_level)
4 changes: 2 additions & 2 deletions ml-agents-envs/mlagents_envs/side_channel/outgoing_message.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from typing import List
import struct

import logging
from mlagents_envs.logging_util import get_logger

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


class OutgoingMessage:
Expand Down
4 changes: 2 additions & 2 deletions ml-agents-envs/mlagents_envs/side_channel/side_channel.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from abc import ABC, abstractmethod
from typing import List
import uuid
import logging

from mlagents_envs.side_channel import IncomingMessage, OutgoingMessage
from mlagents_envs.logging_util import get_logger

logger = logging.getLogger(__name__)
logger = get_logger(__name__)


class SideChannel(ABC):
Expand Down
10 changes: 0 additions & 10 deletions ml-agents/mlagents/logging_util.py

This file was deleted.

5 changes: 3 additions & 2 deletions ml-agents/mlagents/model_serialization.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from distutils.util import strtobool
import os
import logging
from typing import Any, List, Set, NamedTuple
from distutils.version import LooseVersion

Expand All @@ -19,14 +18,16 @@

from tensorflow.python.platform import gfile
from tensorflow.python.framework import graph_util

from mlagents_envs.logging_util import get_logger
from mlagents.trainers import tensorflow_to_barracuda as tf2bc

if LooseVersion(tf.__version__) < LooseVersion("1.12.0"):
# ONNX is only tested on 1.12.0 and later
ONNX_EXPORT_ENABLED = False

logger = get_logger(__name__)

logger = logging.getLogger("mlagents.trainers")

POSSIBLE_INPUT_NODES = frozenset(
[
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
import logging
from typing import Any, Dict, List
from collections import namedtuple
import numpy as np
import abc

from mlagents.tf_utils import tf

from mlagents_envs.logging_util import get_logger
from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.buffer import AgentBuffer

logger = logging.getLogger("mlagents.trainers")

logger = get_logger(__name__)

RewardSignalResult = namedtuple(
"RewardSignalResult", ["scaled_reward", "unscaled_reward"]
Expand Down
4 changes: 2 additions & 2 deletions ml-agents/mlagents/trainers/curriculum.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@

from .exception import CurriculumConfigError, CurriculumLoadingError

import logging
from mlagents_envs.logging_util import get_logger

logger = logging.getLogger("mlagents.trainers")
logger = get_logger(__name__)


class Curriculum:
Expand Down
5 changes: 3 additions & 2 deletions ml-agents/mlagents/trainers/env_manager.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
from abc import ABC, abstractmethod
import logging
from typing import List, Dict, NamedTuple, Iterable, Tuple
from mlagents_envs.base_env import BatchedStepResult, AgentGroupSpec, AgentGroup
from mlagents_envs.side_channel.stats_side_channel import StatsAggregationMethod
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.agent_processor import AgentManager, AgentManagerQueue
from mlagents.trainers.action_info import ActionInfo
from mlagents_envs.logging_util import get_logger

AllStepResult = Dict[AgentGroup, BatchedStepResult]
AllGroupSpec = Dict[AgentGroup, AgentGroupSpec]

logger = logging.getLogger("mlagents.trainers")

logger = get_logger(__name__)


class EnvironmentStep(NamedTuple):
Expand Down
5 changes: 3 additions & 2 deletions ml-agents/mlagents/trainers/ghost/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
from typing import Deque, Dict, List, Any, cast

import numpy as np
import logging

from mlagents_envs.logging_util import get_logger
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.policy import Policy
from mlagents.trainers.policy.tf_policy import TFPolicy
Expand All @@ -16,7 +16,8 @@
from mlagents.trainers.stats import StatsPropertyType
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers

logger = logging.getLogger("mlagents.trainers")

logger = get_logger(__name__)


class GhostTrainer(Trainer):
Expand Down
17 changes: 9 additions & 8 deletions ml-agents/mlagents/trainers/learn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# # Unity ML-Agents Toolkit
import logging
import argparse

import os
Expand Down Expand Up @@ -30,7 +29,9 @@
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfig
from mlagents_envs.exception import UnityEnvironmentException
from mlagents_envs.timers import hierarchical_timer, get_timer_tree
from mlagents.logging_util import create_logger
from mlagents_envs import logging_util
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to import everything ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I received the opposite feedback from @ervteng on Friday, see #3703 (comment)

I think this is preferable when we want to use the logging levels; it's clearer in the code to see logging_util.INFO instead of INFO.

Another alternative is to use import logging and disable the "banned module" error here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
from mlagents_envs import logging_util
from mlagents_envs.logging_util import get_logger, set_log_level, INFO, DEBUG


logger = logging_util.get_logger(__name__)


def _create_parser():
Expand Down Expand Up @@ -315,7 +316,7 @@ def write_timing_tree(summaries_dir: str, run_id: str) -> None:
with open(timing_path, "w") as f:
json.dump(get_timer_tree(), f, indent=4)
except FileNotFoundError:
logging.warning(
logger.warning(
f"Unable to save to {timing_path}. Make sure the directory exists"
)

Expand Down Expand Up @@ -412,16 +413,16 @@ def run_cli(options: RunOptions) -> None:
print(get_version_string())

if options.debug:
log_level = logging.DEBUG
log_level = logging_util.DEBUG
else:
log_level = logging.INFO
log_level = logging_util.INFO
# disable noisy warnings from tensorflow
tf_utils.set_warnings_enabled(False)

trainer_logger = create_logger("mlagents.trainers", log_level)
logging_util.set_log_level(log_level)

trainer_logger.debug("Configuration for this run:")
trainer_logger.debug(json.dumps(options._asdict(), indent=4))
logger.debug("Configuration for this run:")
logger.debug(json.dumps(options._asdict(), indent=4))

run_seed = options.seed
if options.cpu:
Expand Down
4 changes: 2 additions & 2 deletions ml-agents/mlagents/trainers/meta_curriculum.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
from typing import Dict, Set
from mlagents.trainers.curriculum import Curriculum

import logging
from mlagents_envs.logging_util import get_logger

logger = logging.getLogger("mlagents.trainers")
logger = get_logger(__name__)


class MetaCurriculum:
Expand Down
4 changes: 2 additions & 2 deletions ml-agents/mlagents/trainers/policy/tf_policy.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import logging
from typing import Any, Dict, List, Optional
import abc
import numpy as np
from mlagents.tf_utils import tf
from mlagents import tf_utils
from mlagents_envs.exception import UnityException
from mlagents_envs.logging_util import get_logger
from mlagents.trainers.policy import Policy
from mlagents.trainers.action_info import ActionInfo
from mlagents.trainers.trajectory import SplitObservations
Expand All @@ -13,7 +13,7 @@
from mlagents.trainers.models import ModelUtils


logger = logging.getLogger("mlagents.trainers")
logger = get_logger(__name__)


class UnityPolicyException(UnityException):
Expand Down
4 changes: 2 additions & 2 deletions ml-agents/mlagents/trainers/ppo/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
# ## ML-Agent Learning (PPO)
# Contains an implementation of PPO as described in: https://arxiv.org/abs/1707.06347

import logging
from collections import defaultdict

import numpy as np

from mlagents_envs.logging_util import get_logger
from mlagents.trainers.policy.nn_policy import NNPolicy
from mlagents.trainers.trainer.rl_trainer import RLTrainer
from mlagents.trainers.brain import BrainParameters
Expand All @@ -16,7 +16,7 @@
from mlagents.trainers.exception import UnityTrainerException


logger = logging.getLogger("mlagents.trainers")
logger = get_logger(__name__)


class PPOTrainer(RLTrainer):
Expand Down
4 changes: 2 additions & 2 deletions ml-agents/mlagents/trainers/sac/optimizer.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging
import numpy as np
from typing import Dict, List, Optional, Any, Mapping

from mlagents.tf_utils import tf

from mlagents_envs.logging_util import get_logger
from mlagents.trainers.sac.network import SACPolicyNetwork, SACTargetNetwork
from mlagents.trainers.models import LearningRateSchedule, EncoderType, ModelUtils
from mlagents.trainers.optimizer.tf_optimizer import TFOptimizer
Expand All @@ -13,7 +13,7 @@

EPSILON = 1e-6 # Small value to avoid divide by zero

logger = logging.getLogger("mlagents.trainers")
logger = get_logger(__name__)

POLICY_SCOPE = ""
TARGET_SCOPE = "target_network"
Expand Down
5 changes: 3 additions & 2 deletions ml-agents/mlagents/trainers/sac/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
# Contains an implementation of SAC as described in https://arxiv.org/abs/1801.01290
# and implemented in https://github.com/hill-a/stable-baselines

import logging
from collections import defaultdict
from typing import Dict
import os

import numpy as np


from mlagents_envs.logging_util import get_logger
from mlagents_envs.timers import timed
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.policy.nn_policy import NNPolicy
Expand All @@ -20,7 +20,8 @@
from mlagents.trainers.exception import UnityTrainerException


logger = logging.getLogger("mlagents.trainers")
logger = get_logger(__name__)

BUFFER_TRUNCATE_PERCENT = 0.8


Expand Down
Loading