Skip to content
This repository has been archived by the owner on Nov 16, 2023. It is now read-only.

Improve inference performance with loaded TransformerChain ML.NET model #371

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions src/python/nimbusml/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2073,13 +2073,9 @@ def _predict(self, X, y=None,
isinstance(X, DataFrame) and isinstance(y, (str, tuple))):
y = y_temp

is_transformer_chain = False
with ZipFile(self.model) as model_zip:
is_transformer_chain = any('TransformerChain' in item
for item in model_zip.namelist())

all_nodes = []
if is_transformer_chain:
if (hasattr(self, '_is_transformer_chain') and
self._is_transformer_chain):
inputs = dict([('data', ''), ('transform_model', self.model)])
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How much is perf gain? If its not much I would like to leave this as it is. Reason is that we actually should move to new ML.NET format so then this will be broken with your new fix

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

6% gain, with .predict called on 100 row UCI Adult test data, in a 100 rep for loop, repeated 5 times.

I think we should take the change. (1) If we leave it as it is, it will still be broken when NimbusML moves to new ML.NET format, and how the graph is constructed will need to change anyway. (2) Moving to new ML.NET format will likely take a long time, as it requires non-trivial changes to how model loading is handled in the entrypoints infrastructure on ML.NET side i.e. new model implementation in addition to PredictorModel and TransformModel. (3) The changes directly below this address an issue where PredictedLabel column is only converted to int32 from bool if it exists (i.e. it is not regression or ranking) and if the dtype is bool (i.e. if it is Binary classification).

if isinstance(X, FileDataStream):
importtext_node = data_customtextloader(
Expand Down Expand Up @@ -2171,10 +2167,12 @@ def _predict(self, X, y=None,
self._run_time = time.time() - start_time
raise e

if is_transformer_chain:
if (hasattr(self, '_is_transformer_chain')
and self._is_transformer_chain
and hasattr(out_data, 'PredictedLabel')
and out_data.PredictedLabel.dtype == 'bool'):
out_data['PredictedLabel'] = out_data['PredictedLabel']*1


if y is not None:
# We need to fix the schema for ranking metrics
if evaltype == 'ranking':
Expand Down Expand Up @@ -2617,6 +2615,10 @@ def load_model(self, src):
self.model = src
self.steps = []

with ZipFile(self.model) as model_zip:
self._is_transformer_chain = any('TransformerChain' in item
for item in model_zip.namelist())

def __getstate__(self):
odict = {'export_version': 2}

Expand Down
10 changes: 9 additions & 1 deletion src/python/nimbusml/tests/pipeline/test_load_save.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import numpy as np
import pandas as pd

from nimbusml import Pipeline
from nimbusml import FileDataStream, Pipeline
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.linear_model import FastLinearBinaryClassifier, OnlineGradientDescentRegressor
Expand All @@ -33,6 +33,8 @@
(train, label) = get_X_y(train_file, label_column, sep=',')
(test, test_label) = get_X_y(test_file, label_column, sep=',')

mlnet_model = os.path.join(os.path.dirname(__file__), '..', 'utils', 'models', 'UciAdultMlNetModel.zip')

def get_temp_file(suffix=None):
fd, file_name = tempfile.mkstemp(suffix=suffix)
fl = os.fdopen(fd, 'w')
Expand Down Expand Up @@ -148,6 +150,12 @@ def test_model_datastream(self):

os.remove(model_filename)

def test_mlnet_model_can_be_scored(self):
data = FileDataStream.read_csv(test_file, sep=',', numeric_dtype=np.float32)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ML.NET model is trained on a file with label, and it expects label to be present in the schema of the data being passed to predict

model = Pipeline()
model.load_model(mlnet_model)
model.predict(data)

def test_pipeline_saves_complete_model_file_when_pickled(self):
model_nimbusml = Pipeline(
steps=[
Expand Down
Binary file not shown.