Skip to content

Commit

Permalink
Sync lazy_tensor_staging back to master (pytorch#72875)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#72875

This diff contains changes from several PRs landed to lazy_tensor_staging branch.
* generating 'fallback' overrides for each codegenned op, useful for debugging
* supports operators which are missing aten:: symbols for op names, instead using their string counterpart
* makes the IR class a base class instead of hardcoding the assumption of TS

It also resolves lint issues and in particular cleans up the following:
* {Type}s shouldn't be passed into isValueType, and using the catch-all base class of CType is nicer than specifying a list of types.

Fixes pytorch#72852

Test Plan: test manually on lazy_tensor_staging branch

Reviewed By: shunting314

Differential Revision: D34250357

fbshipit-source-id: aa7d589f605055d5d02bc77c77fa6f1182ff7497
(cherry picked from commit 2f8f5e4)
  • Loading branch information
wconstab authored and pytorchmergebot committed Feb 18, 2022
1 parent 056b626 commit 69389fb
Show file tree
Hide file tree
Showing 16 changed files with 248 additions and 102 deletions.
2 changes: 1 addition & 1 deletion test/cpp/lazy/test_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace lazy {
class CacheNode : public Node {
public:
explicit CacheNode(const std::string& str)
: Node(OpKind(), /* num_outputs */ 1, /* hash_seed */ Hash(str)),
: Node(OpKind(), /* num_outputs */ 1, /* hash_func */ [&](bool /*bakeInSizes*/) -> hash_t { return Hash(str); }),
str_(str) {}
~CacheNode() override = default;

Expand Down
2 changes: 1 addition & 1 deletion test/cpp/lazy/test_ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace lazy {
class TestLeafNode : public Node {
public:
explicit TestLeafNode(size_t param)
: Node(OpKind(), /* num_outputs */ 1, /* hash_seed */ Hash(param)),
: Node(OpKind(), /* num_outputs */ 1, /* hash_func */[&](bool /*bakeInSizes*/) -> hash_t { return Hash(param); }),
param_(param) {}
~TestLeafNode() override = default;

Expand Down
2 changes: 1 addition & 1 deletion test/cpp/lazy/test_ir_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace lazy {
class IrUtilNode : public Node {
public:
explicit IrUtilNode()
: Node(OpKind(), /* num_outputs */ 1, /* hash_seed */ Hash(0)) {}
: Node(OpKind(), /* num_outputs */ 1, /* hash_func */ [&](bool /*bakeInSizes*/) -> hash_t { return Hash(0); }) {}
~IrUtilNode() override = default;

void AddOperand(Value v) {
Expand Down
53 changes: 39 additions & 14 deletions tools/codegen/api/lazy.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from typing import List, Union, Tuple
from tools.codegen.model import (Type, BaseTy, BaseType, OptionalType,
ListType, OperatorName, FunctionSchema,
Return)
from tools.codegen.api.types import (BaseCppType, BaseCType, OptionalCType,
ConstRefCType, NamedCType,
MutRefCType,
Return, TensorOptionsArguments)
from tools.codegen.api.types import (CType, BaseCppType, BaseCType, OptionalCType,
NamedCType, deviceT, layoutT,
VectorCType, boolT, longT, doubleT, ListCType, stringT,
scalarT, scalarTypeT, ArrayRefCType, ArrayCType, TupleCType)
scalarT, scalarTypeT)

valueT = BaseCppType('torch::lazy', 'Value')

Expand All @@ -33,7 +32,9 @@ def process_ir_type(typ: Type) -> Union[BaseCType, VectorCType, OptionalCType, L
if typ.name == BaseTy.Tensor:
return BaseCType(valueT)
elif typ.name == BaseTy.Scalar:
return BaseCType(scalarT)
# at::scalar has special handling,
# and is wrapped in an IR value just like at::tensor
return BaseCType(valueT)
elif typ.name == BaseTy.ScalarType:
return BaseCType(scalarTypeT)
elif typ.name == BaseTy.int:
Expand All @@ -44,6 +45,10 @@ def process_ir_type(typ: Type) -> Union[BaseCType, VectorCType, OptionalCType, L
return BaseCType(doubleT)
elif typ.name == BaseTy.str:
return BaseCType(stringT)
elif typ.name == BaseTy.Device:
return BaseCType(deviceT)
elif typ.name == BaseTy.Layout:
return BaseCType(layoutT)
else:
raise AssertionError(f"TODO add support for type {repr(typ)}")
elif isinstance(typ, OptionalType):
Expand All @@ -58,19 +63,36 @@ def process_ir_type(typ: Type) -> Union[BaseCType, VectorCType, OptionalCType, L
raise AssertionError(f"unrecognized type {repr(typ)}")


def isValueType(typ: Union[Type, BaseCType, OptionalCType, ConstRefCType, MutRefCType,
ListCType, ArrayRefCType, ArrayCType, VectorCType, TupleCType]) -> bool:
def isValueType(typ: CType) -> bool:
"""
Given a type, determine if it is a Value-like type. This is equivalent to
being Tensor-like, but assumes the type has already been transformed.
"""
if isinstance(typ, BaseCType):
return typ.type == valueT
# I am regretting my naming conventions, but now we are wrapping at::scalar in
# lazy value, while preserving other 'scalar' types as scalars in the IR
return typ.type == valueT or typ.type == scalarT
elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):
return isValueType(typ.elem)
else:
return False

def isWrappedScalarType(typ: Type) -> bool:
"""
Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
Since we literally change the type from scalarT to valueT, information is lost.
This function helps build a list of wrapped scalars to save that information
"""
if isinstance(typ, BaseType):
# I am regretting my naming conventions, but now we are wrapping at::scalar in
# lazy value, while preserving other 'scalar' types as scalars in the IR
return typ.name == BaseTy.Scalar
elif isinstance(typ, (OptionalType, ListType)):
return isWrappedScalarType(typ.elem)
else:
return False


# Inspired by a FunctionSchema object, a LazyIrSchema holds the schema of a Lazy IR node.
# Unlike a FunctionSchema, it has no round-trippable string form (relating to the YAML),
# but carries type information from a native FunctionSchema modified for use with IR nodes,
Expand All @@ -87,6 +109,8 @@ class LazyIrSchema:
# TODO: Need to handle collisions with argument names at some point
returns: Tuple['Return', ...]

wrapped_scalar_names: List[str]

def __init__(self, func: FunctionSchema):

positional_arg_types = []
Expand All @@ -108,14 +132,15 @@ def __init__(self, func: FunctionSchema):
"tensor_options",
"post_tensor_options_kwarg_only",
"out"]:
if getattr(func.arguments, arg_field) is not None:
keyword_arg_types.extend([
NamedCType(
arg.name,
process_ir_type(arg.type)) for arg in getattr(func.arguments, arg_field)])
curr_args = getattr(func.arguments, arg_field)
if curr_args is not None:
if isinstance(curr_args, TensorOptionsArguments):
curr_args = curr_args.all()
keyword_arg_types.extend([NamedCType(arg.name, process_ir_type(arg.type)) for arg in curr_args])
self.keyword_arg_types = tuple(keyword_arg_types)
self.name = func.name
self.returns = func.returns
self.wrapped_scalar_names = [arg.name for arg in func.schema_order_arguments() if isWrappedScalarType(arg.type)]

@property
def node_name(self) -> str:
Expand Down
Loading

0 comments on commit 69389fb

Please sign in to comment.