text stringlengths 1 2.05k |
|---|
pe(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())... |
t= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.uint32).reshape(3,3,3)
y = np.concatenat... |
= Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_2d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_3D():
de... |
"concat_i32_3d_axis_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_1():
x1 = np.arange(0,27).astype(np.int32).... |
concat_1D()
concat_2D()
concat_3D()
@staticmethod
def concat_i8():
def concat_1D():
x1 = np.arange(0,3).astype(np.int8)
x2 = np.arange(3,6).astype(np.int8)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatte... |
):
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatt... |
nput_0, input_1, input_2].span(), 1)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
x3 = np.arange(54... |
x2 = np.arange(4,8).astype(np.int64).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
... |
fp8x23_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
def axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
... |
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=2)
x1 = Ten... |
x1 = np.arange(0,4).astype(np.int64).reshape(2,2)
x2 = np.arange(4,8).astype(np.int64).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_... |
hape ,to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
... |
rray![input_0, input_1, input_2].span(), 1)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
x3 = np.... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Concat_from_sequence(RunAll):
@staticmethod
def concat_from_sequence_u32():
def new_axis_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np... |
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tenso... |
p.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_one"
make_test([sequence], concatenated_tensor, "SequenceTrait::con... |
= "concat_from_sequence_i8_new_axis_zero"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
values_array = []
shape = np.random.randint(1, 4, ... |
_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23... |
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
... |
es.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_t... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
import numpy as np
def r_index_check(r_index, shape_out):
for i in range(len(r_index)):
if r_index[i] >= shape_out[i]:
return False
return True
def stride(arr):
stride = np.zeros(len(arr))
acc = 1
for i in range(len(arr)):
stride[i] = acc
acc *= arr[-(i + 1)... |
kernel_shape}, pads={pads}, "
f"strides={strides}."
) from e
if b == 0:
td += cv.shape[1]
res.append((b, cv))
new_shape = [X.shape[0], *list(res[0][1].shape[1:])]
new_shape[1] = td
final = np.zeros(... |
(pad_tail)
pads = head + tail
if len(X.shape) == 3:
sN, sC, sH = X.shape
(kh,) = kernel_shape
(sth,) = strides
h_out = int(((sH - kh + pads[0] + pads[1]) / sth) + 1)
h0 = pads[0]
oh = -1 * (kh % 2)
bh = -h0
eh = h_out * sth
... |
ads[2]) / sth) + 1)
w_out = int(((sW - kw + pads[1] + pads[3]) / stw) + 1)
h0, w0 = pads[0], pads[1]
oh, ow = -1 * (kh % 2), -1 * (kw % 2)
bh, bw = -h0, -w0
eh, ew = h_out * sth, w_out * stw
res = np.zeros((X.shape[0], W.shape[0], h_out, w_out))
if B is not Non... |
s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[
0, 0
]
else:
s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[
0, 0
... |
continue
z = zo + kz % 2
iz1, iz2 = max(0, z + oz), min(z + oz + kz, sZ)
img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2, iz1:iz2]
... |
]
res[n, nw, hr, wr, zr] += s
return res
else:
nd = len(X.shape[2:])
sN, sC = X.shape[:2]
x_stride = stride(X.shape)
w_stride = stride(W.shape)
x_flatten = X.reshape(int(x_stride[0] * X.shape[0]))
... |
x]) for nx in range(nd)]
idiff_index = [int(i2_index[nx] - i1_index[nx]) for nx in range(nd - 1)]
i_stride = stride(idiff_index)
img = []
for ii in range(int(i_stride[0] * idiff_index... |
ndex) * kernel_shape[nx]
w_.append(w[int(start + j1_index[-1]):int(start + j1_index[-1] + j2_index[nd-1] - j1_index[nd-1])])
img = np.array(img)
... |
class Conv(RunAll):
@staticmethod
def export_conv_1D_no_padding() -> None:
x = np.array(
[
[
[
0.0, 1.0, 2.0, 3.0, 4.0
]
]
]
).astype(np.float32)
w = np.array(
... |
name = "conv_1D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
... |
name, Trait.NN)
@staticmethod
def export_con_2D_with_padding() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.... |
, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49]
],
[
[ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71... |
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_3D_with_padding() -> None:
x = np.array(
[
[
[
[
[ 0, 1, 2, 3,... |
6))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_3D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
... |
[72, 73, 74],[75, 76, 77],[78, 79, 80]
]
]
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[
[
... |
,[12, 13, 14],[15, 16, 17]
],
[
[18, 19, 20],[21, 22, 23],[24, 25, 26]
]
],
[
[
[27, 28, 29],[30... |
6))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_4D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
... |
ption::Some(AUTO_PAD::SAME_LOWER),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![3, 3].span()),"
func_sig += "Option::None,"
func_sig += "Option::Some(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Tra... |
e(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_with_strides_with_padding() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
... |
[
[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]]
]
]
).astype(np.float32)
w = np.array(
[
[
[... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def conv_transpose(
X,
W,
B=None,
auto_pad=None,
dilations=None,
group=None,
kernel_shape=None,
output_padding=None,
output_shape=None,
pads=None,
strides=None,
):
if dilations is None:
dilations = [1 for... |
= W.shape[1] * group
kernel_dim = num_output_channels
C = X.shape[1]
m = kernel_dim
n = np.prod(X.shape[2:])
k = C
w_reshaped = W.reshape((group, k, m))
final = None
if group == 1:
for image_id in range(X.shape[0]):
w_t = w_reshaped[0].T
... |
dices(i, shape):
res = np.empty((len(shape),), dtype=np.int64)
k = len(shape) - 1
while k > 0:
m = i % shape[k]
res[k] = m
i -= m
i /= shape[k]
k -= 1
res[0] = i
return res
def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides):
... |
i, :].sum()
- (dilations[i] * (kernel_shape[i] - 1) + 1)
)
dim_col.append(col)
kernel_size = np.prod(kernel_shape)
col_size = np.prod(dim_col)
for c_col in range(kernel_size):
offset = _get_indices(c_col, kernel_shape)
for col in range(col_size):
... |
class Conv_transpose(RunAll):
@staticmethod
def export_conv_transpose() -> None:
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0],... |
ne,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Tra... |
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=1)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.fla... |
nc_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_pads() -> None:
x = np.... |
x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_dilations"
func_sig = "NNTrait::conv_transpose("
func_s... |
func_sig += "Option::Some(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_group_2() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0], [3.0, 4.0,... |
[
[
[18.0, 19.0, 20.0], [21.0, 22.0, 23.0], [24.0, 25.0, 26.0]
],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]
]
],
[
[
... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Cos(RunAll):
@staticmethod
def cos_fp8x23():
x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64)
y = np.cos(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Cosh(RunAll):
@staticmethod
def cosh_fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.cosh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Cumsum(RunAll):
@staticmethod
def cumsum_u32():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.uint32)
y = np.array([1, 3, 6, 10, 15]).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
... |
):
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.uint32).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.uint32).reshape((2, 3))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Te... |
me(false))", name)
def reverse():
x = np.array([1, 2, 3, 4, 5]).astype(np.int32)
y = np.array([15, 14, 12, 9, 5]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name =... |
[x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name)
axis_0()
axis_1()
cumsum_2D()
@staticmethod
def cumsum_i8():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.int8)
y = np.array([1,... |
reverse()
reverse_exclusive()
cumsum_1D()
def cumsum_2D():
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int8).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.int8).reshape((2, 3))
... |
.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x23_1d_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name)
def reverse(... |
3_2d_axis_0"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def axis_1():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int64).reshape((2, 3))
y = np.array([1, 3, 6, 4, 9, 15]).astype(
... |
1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([15, 14, 12, 9, 5]).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16)... |
np.int64).reshape((2, 3))
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_2d_axis_1"
make... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def depth_to_space(data: np.ndarray, blocksize: int = 2, mode = "DCR") -> np.ndarray:
if len(data.shape) != 4:
raise RuntimeError(f"Unexpected shape {data.shape!r}.")
b, c, h, w = data.shape
if mode == "DCR":
tmpshape = (
... |
class Depth_to_space(RunAll):
@staticmethod
def fp8x23():
x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float64)
y = depth_to_space(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
... |
en())
name = "depth_to_space_u32"
make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')",
name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Div(RunAll):
@staticmethod
def div_u32():
def default():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.uint32)
z = x / y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(D... |
randint(1, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.int8)
z = x / y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "div... |
test([x, y], z, "input_0 / input_1", name)
default()
broadcast()
@staticmethod
def div_fp16x16():
def default():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
z = x / y
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Equal(RunAll):
@staticmethod
def equal_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
... |
def equal_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
... |
qual_fp8x23_broadcast"
make_test([x, y], z, "input_0.equal(@input_1)", name)
default()
broadcast()
@staticmethod
def equal_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).as... |
import numpy as np
from math import erf
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Erf(RunAll):
@staticmethod
def erf_fp8x23():
x = np.asarray([0.12, -1.66, 3.4, 4.8, 2.7]).astype(np.float64).reshape(1,5)
y = np.asarray([erf(value) f... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Exp(RunAll):
@staticmethod
def exp_fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.exp(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Gather(RunAll):
@staticmethod
def gather_fp16x16():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32)
y = x1.take(x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape... |
name= name)
def negative_indices():
x1 = np.arange(10).astype(np.float32)
x2 = np.array([0, -9, -10]).astype(np.int64)
y = np.take(x1, x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.