|
| 1 | +import numbers |
| 2 | +from mindspore import ops |
| 3 | +from mindspore.ops.auto_generate import gen_ops_prim |
| 4 | +from mindspore.ops.auto_generate import pyboost_inner_prim |
| 5 | +from mindspore._c_expression import _empty_instance |
| 6 | + |
| 7 | +from mindnlp import core |
| 8 | +from mindnlp.core._C import default_generator |
| 9 | + |
| 10 | +op_list = list(filter(lambda s: s.endswith("_op"), dir(gen_ops_prim))) |
| 11 | + |
| 12 | +__all__ = [] |
| 13 | + |
| 14 | +for op_name in op_list: |
| 15 | + func_name = op_name.replace('_op', '') |
| 16 | + __all__.append(func_name) |
| 17 | + globals()[func_name] = getattr(gen_ops_prim, op_name).__class__().set_device('Ascend') |
| 18 | + |
| 19 | +def empty(*args, **kwargs): |
| 20 | + return _empty_instance(*args, **kwargs, device='Ascend') |
| 21 | + |
| 22 | +def reduce_any(input, dim, keepdim): |
| 23 | + if dim is None: |
| 24 | + dim = () |
| 25 | + return pyboost_inner_prim.reduce_any_impl(input, dim, keepdim) |
| 26 | + |
| 27 | +__all__.append('reduce_any') |
| 28 | + |
| 29 | +def reduce_all(input, dim, keepdim): |
| 30 | + if dim is None: |
| 31 | + dim = () |
| 32 | + return pyboost_inner_prim.reduce_all_impl(input, dim, keepdim) |
| 33 | + |
| 34 | +__all__.append('reduce_all') |
| 35 | + |
| 36 | +broadcast_to_op = ops.Primitive('BroadcastTo').set_device('Ascend') |
| 37 | +def broadcast_to(*args): |
| 38 | + return broadcast_to_op(*args) |
| 39 | + |
| 40 | +__all__.append('broadcast_to') |
| 41 | + |
| 42 | +cast_op = ops.Cast().set_device('Ascend') |
| 43 | +def cast(*args): |
| 44 | + return cast_op(*args) |
| 45 | + |
| 46 | +__all__.append('cast') |
| 47 | + |
| 48 | +zeros_op = ops.Zeros().set_device('Ascend') |
| 49 | +def zeros(*args): |
| 50 | + return zeros_op(*args) |
| 51 | + |
| 52 | +__all__.append('zeros') |
| 53 | + |
| 54 | +def softmax(*args): |
| 55 | + return pyboost_inner_prim.softmax_impl(*args) |
| 56 | + |
| 57 | +__all__.append('softmax') |
| 58 | + |
| 59 | +def dropout_ext(input, p): |
| 60 | + seed, offset = default_generator._step(12) # pylint: disable=protected-access |
| 61 | + return gen_ops_prim.dropout_ext_op(input, p, seed, offset) |
| 62 | + |
| 63 | +def squeeze(*args): |
| 64 | + return pyboost_inner_prim.squeeze_impl(*args) |
| 65 | + |
| 66 | +__all__.append('squeeze') |
| 67 | + |
| 68 | +ones_op = ops.Ones().set_device('Ascend') |
| 69 | +def ones(*args): |
| 70 | + return ones_op(*args) |
| 71 | + |
| 72 | +__all__.append('ones') |
| 73 | + |
| 74 | +def nllloss(*args): |
| 75 | + return pyboost_inner_prim.nllloss_impl(*args) |
| 76 | + |
| 77 | +__all__.append('nllloss') |
| 78 | + |
| 79 | +def repeat_elements(*args): |
| 80 | + return ops.repeat_elements(*args) |
| 81 | + |
| 82 | +__all__.append('repeat_elements') |
| 83 | + |
| 84 | +def concat(*args): |
| 85 | + return pyboost_inner_prim.concat_impl(*args) |
| 86 | + |
| 87 | +__all__.append('concat') |
| 88 | + |
| 89 | +def multinomial_ext(input, num_samples, replacement, generator): |
| 90 | + seed, offset = generator._step(12) # pylint: disable=protected-access |
| 91 | + return gen_ops_prim.multinomial_ext_op(input, num_samples, replacement, seed, offset) |
| 92 | + |
| 93 | +def isclose(*args): |
| 94 | + return pyboost_inner_prim.isclose_impl(*args) |
| 95 | + |
| 96 | +__all__.append('isclose') |
| 97 | + |
| 98 | +tile_op = ops.Primitive('Tile').set_device('Ascend') |
| 99 | +def tile(*args): |
| 100 | + return tile_op(*args) |
| 101 | + |
| 102 | +__all__.append('tile') |
| 103 | + |
| 104 | +def pad_v3(input_x, padding, mode='constant', value=None): |
| 105 | + pad_op = ops.PadV3(mode=mode, paddings_contiguous=True).set_device('CPU') |
| 106 | + if isinstance(value, (float, int)): |
| 107 | + value = core.tensor(value, dtype=input_x.dtype) |
| 108 | + return pad_op(input_x, padding, value) |
| 109 | + |
| 110 | +__all__.append('pad_v3') |
| 111 | + |
| 112 | +def inplace_uniform(input, from_, to_, generator_): |
| 113 | + seed, offset = generator_._step(12) |
| 114 | + return gen_ops_prim.inplace_uniform_op(input, from_, to_, seed, offset) |
| 115 | + |
| 116 | +def binary_cross_entropy_with_logits(*args): |
| 117 | + return pyboost_inner_prim.binary_cross_entropy_with_logits_impl(*args) |
| 118 | + |
| 119 | +__all__.append('binary_cross_entropy_with_logits') |
| 120 | + |
| 121 | +def gather(input_params, input_indices, axis, batch_dims=0): |
| 122 | + return ops.gather(input_params, input_indices, axis, batch_dims) |
| 123 | + |
| 124 | +__all__.append('gather') |
| 125 | + |
| 126 | +def randint(low, high, shape, dtype, generator): |
| 127 | + seed, offset = generator._step(12) # pylint: disable=protected-access |
| 128 | + return gen_ops_prim.randint_op(low, high, shape, seed, offset, dtype) |
| 129 | + |
| 130 | +def stack_ext(*args): |
| 131 | + return pyboost_inner_prim.stack_ext_impl(*args) |
| 132 | + |
| 133 | +__all__.append('stack_ext') |
| 134 | + |
| 135 | +def argmax_with_value(*args): |
| 136 | + return pyboost_inner_prim.argmax_with_value_impl(*args) |
| 137 | + |
| 138 | +__all__.append('argmax_with_value') |
| 139 | + |
| 140 | +right_shift_op = ops.RightShift().set_device('Ascend') |
| 141 | +def right_shift(input, other): |
| 142 | + if isinstance(other, numbers.Number): |
| 143 | + other = core.Tensor(other, input.dtype) |
| 144 | + return right_shift_op(input, other) |
| 145 | + |
| 146 | +tensor_mul = ops.Mul().set_device('Ascend') |
| 147 | +tensor_pow = ops.Pow().set_device('Ascend') |
| 148 | +def ldexp(input, other): |
| 149 | + out = tensor_mul(input, tensor_pow(2.0, other)) |
| 150 | + return out |
| 151 | + |
| 152 | +__all__.append('ldexp') |
| 153 | + |
| 154 | +def reverse_v2(input, dims): |
| 155 | + if isinstance(dims, int): |
| 156 | + dims = (dims,) |
| 157 | + return pyboost_inner_prim.reverse_v2_impl(input, dims) |
| 158 | + |
| 159 | +__all__.append('reverse_v2') |
0 commit comments