Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
sakoush committed Oct 11, 2021
1 parent 524b812 commit ae6aff1
Show file tree
Hide file tree
Showing 8 changed files with 36 additions and 22 deletions.
12 changes: 8 additions & 4 deletions runtimes/alibi-explain/mlserver_alibi_explain/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,18 @@

_TAG_TO_RT_IMPL = {
_ANCHOR_IMAGE_TAG: (
"mlserver_alibi_explain.explainers.black_box_runtime.AlibiExplainBlackBoxRuntime",
"mlserver_alibi_explain.explainers.black_box_runtime."
"AlibiExplainBlackBoxRuntime",
"alibi.explainers.AnchorImage",
),
_ANCHOR_TEXT_TAG: (
"mlserver_alibi_explain.explainers.black_box_runtime.AlibiExplainBlackBoxRuntime",
"mlserver_alibi_explain.explainers.black_box_runtime."
"AlibiExplainBlackBoxRuntime",
"alibi.explainers.AnchorText",
),
_INTEGRATED_GRADIENTS_TAG: (
"mlserver_alibi_explain.explainers.integrated_gradients.IntegratedGradientsWrapper",
"mlserver_alibi_explain.explainers.integrated_gradients."
"IntegratedGradientsWrapper",
"alibi.explainers.IntegratedGradients",
),
}
Expand Down Expand Up @@ -70,7 +73,8 @@ def remote_predict(
return InferenceResponse.parse_raw(response_raw.text)


# TODO: this is very similar to `asyncio.to_thread` (python 3.9+), so lets use it at some point.
# TODO: this is very similar to `asyncio.to_thread` (python 3.9+),
# so lets use it at some point.
def execute_async(
loop: Optional[AbstractEventLoop], fn: Callable, *args, **kwargs
) -> Awaitable:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@

class AlibiExplainBlackBoxRuntime(AlibiExplainRuntimeBase):
"""
Runtime for black box explainer runtime, i.e. explainer that would just need access to infer feature from the
underlying model (no gradients etc.)
Runtime for black box explainer runtime, i.e. explainer that would just need access
to infer feature from the underlying model (no gradients etc.)
"""

def __init__(self, settings: ModelSettings, explainer_class: Type[Explainer]):
Expand Down Expand Up @@ -48,8 +48,8 @@ def _infer_impl(self, input_data: np.ndarray) -> np.ndarray:

v2_request = InferenceRequest(
parameters=Parameters(content_type=NumpyCodec.ContentType),
# TODO: we probably need to tell alibi about the expected types to use or even whether it is a
# proba or targets etc
# TODO: we probably need to tell alibi about the expected types to use
# or even whether it is a proba or targets etc
inputs=[np_codec.encode_request_input(name="predict", payload=input_data)],
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from alibi.api.interfaces import Explanation, Explainer
from alibi.saving import load_explainer
from pydantic import BaseSettings

from mlserver import ModelSettings
from mlserver_alibi_explain.common import AlibiExplainSettings
Expand All @@ -11,7 +10,8 @@

class AlibiExplainWhiteBoxRuntime(AlibiExplainRuntimeBase):
"""
White box alibi explain requires access to the full inference model to compute gradients etc. usually in the same
White box alibi explain requires access to the full inference model
to compute gradients etc. usually in the same
domain as the explainer itself. e.g. `IntegratedGradients`
"""

Expand Down
8 changes: 6 additions & 2 deletions runtimes/alibi-explain/mlserver_alibi_explain/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,10 @@ def __init__(
super().__init__(settings)

async def predict(self, payload: InferenceRequest) -> InferenceResponse:
"""This is actually a call to explain as we are treating an explainer model as MLModel"""
"""
This is actually a call to explain as we are treating
an explainer model as MLModel
"""

# TODO: convert and validate?
model_input = payload.inputs[0]
Expand Down Expand Up @@ -79,7 +82,8 @@ class AlibiExplainRuntime(MLModel):

def __init__(self, settings: ModelSettings):
# TODO: we probably want to validate the enum more sanely here
# we do not want to construct a specific alibi settings here because it might be dependent on type
# we do not want to construct a specific alibi settings here because
# it might be dependent on type
# although at the moment we only have one `AlibiExplainSettings`
explainer_type = settings.parameters.extra[EXPLAINER_TYPE_TAG]

Expand Down
14 changes: 9 additions & 5 deletions runtimes/alibi-explain/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@
from .test_model import TFMNISTModel

# allow nesting loop
# in our case this allows multiple runtimes to execute in the same thread for testing reasons
# in our case this allows multiple runtimes to execute
# in the same thread for testing reasons
nest_asyncio.apply()

TESTS_PATH = os.path.dirname(__file__)
Expand All @@ -40,7 +41,8 @@ def pytest_collection_modifyitems(items):
item.add_marker("asyncio")


# TODO: there is a lot of common code here with testing rest calls, refactor perhaps to make it neater
# TODO: there is a lot of common code here with testing rest calls,
# refactor perhaps to make it neater
@pytest.fixture
def pytorch_model_uri() -> str:
pytorch_model_path = os.path.join(TESTDATA_PATH, "pytorch_model")
Expand Down Expand Up @@ -159,9 +161,11 @@ async def anchor_image_runtime_with_remote_predict_patch(
with patch(remote_predict_mock_path) as remote_predict:

def mock_predict(*args, **kwargs):
# note: sometimes the event loop is not running and in this case we create a new one otherwise
# note: sometimes the event loop is not running and in this case
# we create a new one otherwise
# we use the existing one.
# this mock implementation is required as we dont want to spin up a server, we just use MLModel.predict
# this mock implementation is required as we dont want to spin up a server,
# we just use MLModel.predict
try:
loop = asyncio.get_event_loop()
res = loop.run_until_complete(
Expand All @@ -184,7 +188,7 @@ def mock_predict(*args, **kwargs):
parameters=ModelParameters(
uri=f"{TESTS_PATH}/data/mnist_anchor_image",
extra=AlibiExplainSettings(
explainer_type="anchor_image", infer_uri=f"dummy_call"
explainer_type="anchor_image", infer_uri="dummy_call"
),
),
)
Expand Down
7 changes: 4 additions & 3 deletions runtimes/alibi-explain/tests/test_black_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ async def test_predict_impl(
):
# note: custom_runtime_tf is the underlying inference runtime
# we want to test that the underlying impl predict is functionally correct
# anchor_image_runtime fixture if already mocking `remote_predict` -> custom_runtime_tf.predict
# anchor_image_runtime fixture is already mocking
# `remote_predict` -> custom_runtime_tf.predict

# [batch, image_x, image_y, channel]
data = np.random.randn(10, 28, 28, 1) * 255
Expand Down Expand Up @@ -77,8 +78,8 @@ async def test_end_2_end(
alibi_anchor_image_model,
payload: InferenceRequest,
):
# in this test we are getting explanation and making sure that it the same one as returned by alibi
# directly
# in this test we are getting explanation and making sure that it the same one
# as returned by alibi directly
runtime_result = await anchor_image_runtime_with_remote_predict_patch.predict(
payload
)
Expand Down
3 changes: 2 additions & 1 deletion runtimes/alibi-explain/tests/test_integrated_gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ async def test_end_2_end(
alibi_integrated_gradients_model,
payload: InferenceRequest,
):
# in this test we are getting explanation and making sure that it the same one as returned by alibi
# in this test we are getting explanation and making sure that it the same
# one as returned by alibi
# directly
runtime_result = await integrated_gradients_runtime.predict(payload)
decoded_runtime_results = json.loads(
Expand Down
2 changes: 1 addition & 1 deletion runtimes/alibi-explain/tests/test_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def test_remote_predict__smoke(runtime_pytorch, rest_client):

async def test_alibi_runtime_wrapper(custom_runtime_tf: MLModel):
"""
Checks that the wrappers returns back the expected valued from the underlying runtime
Checks that the wrappers returns back the expected valued from the underlying rt
"""

class _MockInit(AlibiExplainRuntime):
Expand Down

0 comments on commit ae6aff1

Please sign in to comment.