Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ stage('Build') {
echo set\\(USE_GRAPH_RUNTIME ON\\) >> config.cmake
echo set\\(USE_STACKVM_RUNTIME ON\\) >> config.cmake
echo set\\(USE_GRAPH_RUNTIME_DEBUG ON\\) >> config.cmake
echo set\\(USE_ANTLR ON\\) >> config.cmake
echo set\\(USE_BLAS openblas\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
Expand Down Expand Up @@ -134,7 +133,6 @@ stage('Build') {
echo set\\(USE_LLVM llvm-config-4.0\\) >> config.cmake
echo set\\(USE_NNPACK ON\\) >> config.cmake
echo set\\(NNPACK_PATH /NNPACK/build/\\) >> config.cmake
echo set\\(USE_ANTLR ON\\) >> config.cmake
echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake
echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake
"""
Expand All @@ -143,7 +141,6 @@ stage('Build') {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_cpp_unittest.sh"
sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_python_vta.sh"
sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_rust.sh"
sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_golang.sh"
sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_python_unittest.sh"
sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_python_integration.sh"
Expand Down
2 changes: 1 addition & 1 deletion cmake/modules/ANTLR.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ if(USE_ANTLR)
/usr/local/lib/antlr-*-complete.jar
/usr/local/Cellar/*antlr-*-complete.jar)

if(DEFINED ANTLR4)
if(ANTLR4)
# Get the first element of the list of antlr jars.
# Sort and reverse the list so the item selected is the highest
# version in lib or else in Cellar if no lib installation exists.
Expand Down
15 changes: 7 additions & 8 deletions nnvm/python/nnvm/top/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,9 @@ def schedule_log_softmax(_, outs, target):
@reg.register_compute("dense")
def compute_dense(attrs, inputs, _):
"""Compute definition of dense"""
with tvm.target.create(attrs.get_string("target")):
if attrs.get_bool("use_bias"):
return topi.nn.dense(inputs[0], inputs[1], bias=inputs[2])
return topi.nn.dense(inputs[0], inputs[1])
if attrs.get_bool("use_bias"):
return topi.nn.dense(inputs[0], inputs[1], bias=inputs[2])
return topi.nn.dense(inputs[0], inputs[1])

@reg.register_schedule("dense")
def schedule_dense(_, outs, target):
Expand Down Expand Up @@ -313,22 +312,22 @@ def schedule_conv2d_transpose(attrs, outs, target):

@reg.register_alter_op_layout("max_pool2d")
def alter_pooling_layout_max_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
with tvm.target.create(attrs["target"]):
return topi.nn.max_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("avg_pool2d")
def alter_pooling_layout_avg_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
with tvm.target.create(attrs["target"]):
return topi.nn.avg_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("global_max_pool2d")
def alter_pooling_layout_global_max_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
with tvm.target.create(attrs["target"]):
return topi.nn.global_max_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("global_avg_pool2d")
def alter_pooling_layout_global_avg_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
with tvm.target.create(attrs["target"]):
return topi.nn.global_avg_pool2d_alter_layout(attrs, inputs, tinfos)

# max_pool2d
Expand Down
2 changes: 2 additions & 0 deletions nnvm/src/pass/subgraph/tensorrt_subgraph_property.cc
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ class TensorRTSubgraphProperty: public SubgraphProperty {
this->GetAttr<std::unordered_set<std::string>>("op_names"));
}

virtual ~TensorRTSubgraphProperty() {}

private:
nnvm::Symbol RemoveFlattenOpNodes(nnvm::Symbol sym) const {
std::stack<nnvm::Node*> node_stack;
Expand Down
6 changes: 4 additions & 2 deletions nnvm/src/top/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -194,11 +194,12 @@ inline bool BatchNormInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool BatchNormCorrectLayout(const NodeAttrs& attrs,
inline bool BatchNormCorrectLayout(const NodeAttrs& attrs_const,
std::vector<TShape>* ishapes,
std::vector<Layout> *in_layouts,
const std::vector<Layout> *last_in_layouts,
std::vector<Layout> *out_layouts) {
NodeAttrs& attrs = const_cast<NodeAttrs&>(attrs_const);
const BatchNormParam& param = nnvm::get<BatchNormParam>(attrs.parsed);
CHECK_EQ(in_layouts->size(), 5U);
CHECK_EQ(last_in_layouts->size(), 5U);
Expand Down Expand Up @@ -593,11 +594,12 @@ inline bool PadInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool PadCorrectLayout(const NodeAttrs& attrs,
inline bool PadCorrectLayout(const NodeAttrs& attrs_const,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
std::vector<Layout>* olayouts) {
NodeAttrs& attrs = const_cast<NodeAttrs&>(attrs_const);
const PadParam& param = nnvm::get<PadParam>(attrs.parsed);
const auto& last_layout = last_ilayouts->at(0);
Layout layout = ilayouts->at(0);
Expand Down
56 changes: 0 additions & 56 deletions topi/python/topi/cuda/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,59 +131,3 @@ def traverse(OP):

traverse(outs[0].op)
return s


@avg_pool2d_alter_layout.register(["cuda"])
def _alter_avg_pool2d_layout(attrs, inputs, tinfo):
import nnvm.symbol as sym
copy_inputs = [s for s in inputs]
new_attrs = {k : attrs[k] for k in attrs.keys()}
# NHWC -> NCHW
if attrs["layout"] != "NHWC":
return None
new_attrs["layout"] = "NCHW"
if "target" in new_attrs:
del new_attrs["target"]
return sym.avg_pool2d(*copy_inputs, **new_attrs)


@max_pool2d_alter_layout.register(["cuda"])
def _alter_max_pool2d_layout(attrs, inputs, tinfo):
import nnvm.symbol as sym
copy_inputs = [s for s in inputs]
new_attrs = {k : attrs[k] for k in attrs.keys()}
# NHWC -> NCHW
if attrs["layout"] != "NHWC":
return None
new_attrs["layout"] = "NCHW"
if "target" in new_attrs:
del new_attrs["target"]
return sym.max_pool2d(*copy_inputs, **new_attrs)


@global_max_pool2d_alter_layout.register(["cuda"])
def _alter_global_max_pool2d_layout(attrs, inputs, tinfo):
import nnvm.symbol as sym
copy_inputs = [s for s in inputs]
new_attrs = {k : attrs[k] for k in attrs.keys()}
# NHWC -> NCHW
if attrs["layout"] != "NHWC":
return None
new_attrs["layout"] = "NCHW"
if "target" in new_attrs:
del new_attrs["target"]
return sym.global_max_pool2d(*copy_inputs, **new_attrs)


@global_avg_pool2d_alter_layout.register(["cuda"])
def _alter_global_avg_pool2d_layout(attrs, inputs, tinfo):
import nnvm.symbol as sym
copy_inputs = [s for s in inputs]
new_attrs = {k : attrs[k] for k in attrs.keys()}
# NHWC -> NCHW
if attrs["layout"] != "NHWC":
return None
new_attrs["layout"] = "NCHW"
if "target" in new_attrs:
del new_attrs["target"]
return sym.global_avg_pool2d(*copy_inputs, **new_attrs)