Skip to content

Commit

Permalink
Misc fixes for MacOS. (llvm#255)
Browse files Browse the repository at this point in the history
* Change aligned_alloc -> malloc. It can fail (and does on MacOS) and is a bit over-aggressive optimization for a reference backend.
* Fixed a fragile test that prints -0.0 on MacOS.
* Fail the test (not the framework) on failure to trace (Torch on MacOS is missing features).
* Fix .so -> .dylib for compiler runtime.
  • Loading branch information
stellaraccident authored Jul 28, 2021
1 parent 2dbab50 commit ec611c1
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -227,9 +227,9 @@ def run_tests(tests: List[Test], config: TestConfig) -> List[TestResult]:
"""Invoke the given `Test`'s with the provided `TestConfig`."""
results = []
for test in tests:
golden_trace = _generate_golden_trace(test)
# TODO: Precompile everything in parallel.
try:
golden_trace = _generate_golden_trace(test)
compiled = config.compile(test.program_factory())
except Exception as e:
results.append(
Expand Down
3 changes: 2 additions & 1 deletion lib/RefBackend/Runtime/Runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,8 @@ RtValue refbackrt::createRtValueFromOutputArgInfo(const OutputArgInfo &info) {
switch (info.elementType) {
case ElementType::F32: {
auto byteSize = numel * sizeof(float);
data = static_cast<void *>(aligned_alloc(32, byteSize));
data = static_cast<void *>(malloc(byteSize));
assert(data && "could not allocate tensor");
memset(data, 0, byteSize);
return RtValue(Tensor::create(shape, ElementType::F32, data));
break;
Expand Down
7 changes: 6 additions & 1 deletion python/npcomp/compiler/generic/backend/refjit.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

import os
import platform

_refjit = None

Expand Down Expand Up @@ -40,7 +41,11 @@ def is_enabled() -> bool:
def get_runtime_libs():
# The _refjit_resources directory is at the npcomp.compiler level.
resources_dir = os.path.join(os.path.dirname(__file__))
return [os.path.join(resources_dir, "libNPCOMPCompilerRuntimeShlib.so")]
suffix = ".so"
if platform.system() == "Darwin":
suffix = ".dylib"
shlib_name = f"libNPCOMPCompilerRuntimeShlib{suffix}"
return [os.path.join(resources_dir, shlib_name)]


class JitModuleInvoker:
Expand Down
64 changes: 32 additions & 32 deletions python/npcomp/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class _LiterateEnum(Enum):
Traceback (most recent call last):
...
ValueError: Cannot parse SampleEnum 1.0
"""

@classmethod
Expand Down Expand Up @@ -111,11 +111,11 @@ class TypeClass(_LiterateEnum):

class ValueType:
"""The type a value can take in the npcomp language.
Types of values in npcomp are always being refined and are therefore
mutable. Instances represent the type derived for a single value, not a
mutable. Instances represent the type derived for a single value, not a
concept of "typeness" generally.
>>> ValueType()
Any
>>> ValueType('NdArray')
Expand Down Expand Up @@ -166,7 +166,7 @@ def constraints(self):

class ValueTypeList:
"""Models a list of ValueTypes.
>>> v3 = ValueTypeList(3)
>>> v3
(Any, Any, Any)
Expand All @@ -178,7 +178,7 @@ class ValueTypeList:
>>> v3[2] += Rank(2)
>>> v3
(Any, Any, NdArray[Rank(2)])
With names:
>>> v3 = ValueTypeList(3, [None, "b", None])
>>> v3[1] = 'NdArray'
Expand Down Expand Up @@ -221,11 +221,11 @@ def __repr__(self):

class Signature:
"""A function signature.
This currently only models a linear list of positional arguments and
assumes that multiple results will be represented by some form of tuple
type.
>>> Signature()
() -> Any
>>> Signature(2)
Expand Down Expand Up @@ -279,7 +279,7 @@ def __repr__(self):

class ArrayParams:
"""Represents parameters defining how to construct an array.
>>> ArrayParams()
ArrayParams(dtype=Unspec)
>>> ArrayParams(np.float32)
Expand Down Expand Up @@ -309,34 +309,34 @@ def rank(self):
@classmethod
def from_constraints(cls, constraints):
"""Constructs params for a TypeConstraints list.
Unconstrained:
>>> ArrayParams.from_constraints(TypeConstraints())
ArrayParams(dtype=Unspec)
DType constrained:
>>> ArrayParams.from_constraints(TypeConstraints(DType(np.float32)))
ArrayParams(dtype=float32)
Rank constrained:
Rank constrained:
>>> ArrayParams.from_constraints(TypeConstraints(Rank(2)))
ArrayParams(dtype=Unspec, shape=(-1, -1))
Shape constrained:
>>> ArrayParams.from_constraints(TypeConstraints(Shape(1, 2, 3)))
ArrayParams(dtype=Unspec, shape=(1, 2, 3))
>>> ArrayParams.from_constraints(TypeConstraints(
... Rank(3), Shape(1, 2, 3)))
ArrayParams(dtype=Unspec, shape=(1, 2, 3))
Shape constrained with dynamic dim constraint:
>>> ArrayParams.from_constraints(TypeConstraints(
... Shape(1, 2, 3), DynamicDim(1)))
ArrayParams(dtype=Unspec, shape=(1, -1, 3))
>>> ArrayParams.from_constraints(TypeConstraints(
... Shape(1, 2, 3), DynamicDim((0, 2))))
ArrayParams(dtype=Unspec, shape=(-1, 2, -1))
Errors:
>>> ArrayParams.from_constraints(TypeConstraints(
... Rank(4), Shape(1, 2, 3)))
Expand All @@ -346,7 +346,7 @@ def from_constraints(cls, constraints):
>>> ArrayParams.from_constraints(TypeConstraints(
... Shape(1, 2, 3), DynamicDim((0, 5))))
Traceback (most recent call last):
...
...
ValueError: Out of range DimFlag(Dynamic, (0, 5)) for shape [-1, 2, 3]
"""
# TODO: Should have a 'canonicalize' method on TypeConstraints which
Expand Down Expand Up @@ -395,7 +395,7 @@ def __repr__(self):
@property
def is_concrete(self):
"""Returns true if the parameters are sufficient to construct an ndarray.
>>> ArrayParams().is_concrete
False
>>> ArrayParams(dtype=np.float32).is_concrete
Expand All @@ -417,26 +417,26 @@ def is_concrete(self):
def mlir_tensor_type_asm(self):
"""Get a corresponding MLIR tensor type.
Fully Unspecified:
Fully Unspecified:
>>> ArrayParams().mlir_tensor_type_asm
'tensor<*x!numpy.any_dtype>'
Unranked:
>>> ArrayParams(dtype=np.float32).mlir_tensor_type_asm
'tensor<*xf32>'
Ranked:
>>> ArrayParams(dtype=np.float32, rank=3).mlir_tensor_type_asm
'tensor<?x?x?xf32>'
>>> ArrayParams(dtype=np.float32, shape=(-1, -1)).mlir_tensor_type_asm
'tensor<?x?xf32>'
Scalar:
>>> ArrayParams(dtype=np.float32, rank=0).mlir_tensor_type_asm
'tensor<f32>'
>>> ArrayParams(dtype=np.float32, shape=()).mlir_tensor_type_asm
'tensor<f32>'
Shaped:
>>> ArrayParams(dtype=np.float32, shape=(2, 3)).mlir_tensor_type_asm
'tensor<2x3xf32>'
Expand All @@ -460,12 +460,12 @@ def mlir_tensor_type_asm(self):

def new_ndarray(self):
"""Creates a new ndarray from these params.
>>> ArrayParams().new_ndarray()
Traceback (most recent call last):
...
ValueError: ArrayParams(dtype=Unspec) is not concrete
>>> ArrayParams(np.float32, (1, 2)).new_ndarray() * 0.0
>>> (ArrayParams(np.float32, (1, 2)).new_ndarray() * 0.0 + 1.0) * 0.0
array([[0., 0.]], dtype=float32)
"""
if not self.is_concrete:
Expand All @@ -480,7 +480,7 @@ class TypeConstraint:

class TypeConstraints(list):
"""Collection of type constraints.
>>> TypeConstraints([DynamicDim()])
TypeConstraints(DimFlag(Dynamic, Unspec))
>>> TypeConstraints([DynamicDim(), Rank(4)])
Expand Down Expand Up @@ -554,9 +554,9 @@ def dim_flag(self):

class DType(ArrayConstraint):
"""A constraint on a dtype.
DType constraints are exclusive with only one permitted in a set.
>>> DType(np.float32)
DType(float32)
>>> DType("foobar")
Expand Down Expand Up @@ -597,7 +597,7 @@ class Rank(ArrayConstraint):
Traceback (most recent call last):
...
AssertionError
"""
__slots__ = ["_rank"]

Expand All @@ -619,9 +619,9 @@ def __repr__(self):

class Shape(ArrayConstraint):
"""Establishes a static shape for an array.
All dimensions must be a non-negative integer or Unspec.
>>> Shape(1, 2, 3)
Shape(1, 2, 3)
>>> Shape(Unspec, 1)
Expand Down Expand Up @@ -665,9 +665,9 @@ class DimFlagEnum(_LiterateEnum):

class DimFlag(ArrayConstraint):
"""Generic flag applying to one or more dimensions.
If dims is Unspec, the flag applies to all dims.
>>> DimFlag("Dynamic")
DimFlag(Dynamic, Unspec)
>>> DimFlag("Dynamic", 1)
Expand Down

0 comments on commit ec611c1

Please sign in to comment.