text stringlengths 1 2.05k |
|---|
use regex::Regex;
use std::fs;
use std::path::Path; |
fn main() {
let trait_path = "src/operators/tensor/core.cairo";
let doc_path = "docs/framework/operators/tensor";
let label = "tensor";
let trait_name = "TensorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = ... |
ble_regressor";
let trait_name: &str = "TreeEnsembleRegressorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/linear/linear_regressor.cairo";
let doc_path = "docs/framework/operators/machine-learning/line... |
fn doc_trait(trait_path: &str, doc_path: &str, label: &str) {
let path_str = format!("../{}", trait_path);
let path = Path::new(&path_str);
let contents = fs::read_to_string(&path).expect("Could not read the file");
let re = Regex::new(r
let mut table = String::from("| function | de... |
fn doc_functions(trait_path: &str, doc_path: &str, trait_name: &str, label: &str) {
let filepath_str = format!("../{}", trait_path);
let filepath = Path::new(&filepath_str);
let contents = fs::read_to_string(filepath).expect("Something went wrong reading the file");
let trait_re = Regex::new(&form... |
import os
from pathlib |
import Path
BASE_PATH = "./tests/nodes"
class ModFile:
def __init__(self):
"""
Initialize a ModFile object.
This method creates a new file with a .cairo extension in the BASE_PATH directory.
If the directory doesn't exist, it's created. The contents of the file are then read
... |
class CairoTest(File):
def __init__(self, file: str):
super().__init__(os.path.join(BASE_PATH, file))
@classmethod
def base_template(
cls, name: str, arg_cnt: int, refs: list[str], func_sig: str, out_cnt: int = 1
) -> list[str]:
"""
Create a template for a Cairo test fun... |
Cairo test function which expects a tensor sequence.
Args:
name (str): Name of the test function.
arg_cnt (int): Number of arguments for the function.
refs (list[str]): List of references (modules) to be used in the function.
func_sig (str): The function signatur... |
class CairoData(File):
def __init__(self, file: str):
super().__init__(os.path.join(BASE_PATH, file))
@classmethod
def base_template(
cls, func: str, dtype: str, refs: list[str], data: list[str], shape: tuple
) -> list[str]:
"""
Create a base template for data representa... |
Returns:
list[str]: A list of strings that together form the template of a sequence tensor function in Cairo.
This method generates a list of strings representing a function in Cairo for handling a sequence
of tensors, each with its own data and shape.
"""
def expand_sequen... |
from enum |
import Enum |
import os
from typing |
import List
from .file_manager |
import CairoTest, CairoData, ModFile |
import numpy as np |
class FixedImpl(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
FP32x32 = 'FP32x32'
def to_fp(x: np.ndarray, fp_impl: FixedImpl):
match fp_impl:
case FixedImpl.FP8x23:
return (x * 2**23).astype(np.int64)
case FixedImpl.FP16x16:
return (x * 2**16).astype(np.int... |
class Dtype(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
FP32x32 = 'FP32x32'
I8 = 'i8'
I32 = 'i32'
U32 = 'u32'
BOOL = 'bool'
COMPLEX64 = 'complex64'
class Tensor:
def __init__(self, dtype: Dtype, shape: tuple, data: np.ndarray):
self.dtype = dtype
self.shape = shape... |
class Trait(Enum):
TENSOR = 'TENSOR'
NN = 'NN'
SEQUENCE = 'SEQUENCE'
def make_test(inputs: list[Tensor | Sequence], output: Tensor | Sequence, func_sig: str, name: str, trait: Trait = Trait.TENSOR):
"""
Generate and write Cairo tests based on the provided inputs and output.
Args:
inpu... |
x.shape for x in output],
)
output_data.dump()
case tuple():
for i, out in enumerate(output):
output_data = CairoData(
os.path.join(name, f"output_{i}.cairo"))
output_data.buffer = CairoData.base_template(
... |
ef[Trait.TENSOR],
*dtype_to_tensor[dtype],
*dtype_to_numbers[dtype],
]
return refs
def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]:
match dtype:
case Dtype.U32:
return [f"{int(x)}" for x in data.flatten()]
case Dtype.I32:
return ... |
"orion::utils::{assert_eq, assert_seq_eq}",
]
return refs
def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
dtypes = []
for tensor in tensors:
if isinstance(tensor, list) or isinstance(tensor, tuple):
dtypes += [x.dtype for x in tensor]
else:
... |
ators::tensor::U32TensorPartialEq",],
Dtype.I32: ["orion::operators::tensor::I32TensorPartialEq",],
Dtype.I8: ["orion::operators::tensor::I8TensorPartialEq",],
Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",],
Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",],
Dt... |
import argparse
import importlib
import os
import sys
class RunAll:
@classmethod
def run_all(cls):
for method_name in dir(cls):
if method_name.startswith('__') or method_name == 'run_all':
continue
method = getattr(cls, method_name)
if callable(metho... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Abs(RunAll):
@staticmethod
def abs_i32():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = abs(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Acos(RunAll):
@staticmethod
def acos_fp8x23():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.arccos(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Acosh(RunAll):
@staticmethod
def acosh_fp8x23():
x = np.random.uniform(1, 5, (2, 2)).astype(np.float64)
y = np.arccosh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Add(RunAll):
@staticmethod
def add_u32():
def default():
x = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
z = x + y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(D... |
dom.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = x + y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name ... |
make_test([x, y], z, "input_0 + input_1", name)
default()
broadcast()
@staticmethod
def add_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x + y
... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class And(RunAll):
@staticmethod
def and_bool():
def default():
x = (np.random.randn(3, 4) > 0).astype(bool)
y = (np.random.randn(3, 4) > 0).astype(bool)
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray:
result = np.argmax(data, axis=axis)
if keepdims == 1:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmax_use_numpy_select_last_in... |
class Argmax(RunAll):
@staticmethod
def no_keepdims():
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 0
result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype... |
e=np.float32)
axis = 1
keepdims = 0
result = argmax_use_numpy_select_last_index(
data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_no_keepdims_select... |
rue))", name) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def argmin_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1, dtype=np.int64) -> np.ndarray:
result = np.argmin(data, axis=axis)
if keepdims == 1:
result = np.expand_dims(result, axis)
return result.astype(dtype)
def argmin_use_numpy_s... |
class Argmin(RunAll):
@staticmethod
def argmin_u32():
def argmin_1D():
def default_params():
x = np.random.randint(0, 255, (3)).astype(np.uint32)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.U32, x.shape, x.flatten()... |
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.fl... |
e_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
... |
, y.flatten())
name = "argmin_i32_1D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_1D()
def argmin_2D():
... |
nt32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_3D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_f... |
use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_1D_keepdims_false"
make_test(
[x], y, "input_0.argmin... |
y = argmin_use_numpy_select_last_index(
x, dtype=np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_2D_last_index"
make_test(
[x], y, "input_0.argmin(0... |
n_3D()
@staticmethod
def argmin_fp16x16():
def argmin_1D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (3)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1)... |
dImpl.FP16x16)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_2D_default"
make_test(
[x], y, "input_0.argmin(... |
fault"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP16x16)
... |
x = to_fp(np.random.randint(-127, 127, (3)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
... |
eepdims=0, dtype=np.uint32)
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_2D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::S... |
.flatten())
name = "argmin_fp8x23_3D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Array_feature_extractor(RunAll):
@staticmethod
def array_feature_extractor_3D():
def array_feature_extractor_i32():
x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.int32)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.I32... |
def array_feature_extractor_i32():
x = np.random.randint(-3, 3, (3, 4)).astype(np.int32)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtyp... |
stype(np.int32)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "array_feature_extractor_1D_i32"
... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Asin(RunAll):
@staticmethod
def asin_fp8x23():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.arcsin(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Asinh(RunAll):
@staticmethod
def asinh_fp8x23():
x = np.random.uniform(1, 5, (2, 2)).astype(np.float64)
y = np.arcsinh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Atan(RunAll):
@staticmethod
def atan_fp8x23():
x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64)
y = np.arctan(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl
class Binarizer(RunAll):
@staticmethod
def binarizer_fp8x23():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
threshold = np.float64(1)
y = (x > t... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement
def blackman_window(size, output_datatype=None, periodic=None) -> np.ndarray:
if periodic == 1:
N_1 = size
else:
N_1 = size - 1
ni = np.arange(size, dtype=output_datatype)
alpha = 0.42
beta = 0.08
y =... |
class Blackman_window(RunAll):
@staticmethod
def fp8x23():
args = [3]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)
y = blackman_window(*args, np.float64)
y = Tensor(Dtype.FP8x23, y.shape, to_f... |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Ceil(RunAll):
@staticmethod
def ceil_fp8x23():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.ceil(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Clip(RunAll):
@staticmethod
def clip_u32():
def clip_2D():
x = np.random.randint(0, 255, (2, 4)).astype(np.uint32)
y = np.clip(x, np.uint32(10), np.uint32(20))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
... |
.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "clip_i8_2d"
make_test(
[x], y, "input_0.clip(Option::Some(-10_i8), Option::Some(20_i8))", name)
def clip_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8)
... |
.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP16x16)
y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP16x16), to_fp(np.int64(20), FixedImpl.FP16x16))
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, ... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def col2im(data, image_shape, block_shape, dilations=None, pads=None, strides=None):
if dilations is None:
dilations = [1 for s in image_shape]
if pads is None:
pads = [0 for s in image_shape] * 2
if strides is None:
stride... |
!= block_size:
raise ValueError(
f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, "
f"output_shape={output_shape}, kernel_shape={kernel_shape}, "
f"dilations={dilations}, pads={pads}, strides={strides}, "
f"expected size of input's dimension 2 to match th... |
class Col2im(RunAll):
@staticmethod
def export_col2im() -> None:
x = np.array(
[
[
[1.0, 6.0, 11.0, 16.0, 21.0],
[2.0, 7.0, 12.0, 17.0, 22.0],
[3.0, 8.0, 13.0, 18.0, 23.0],
[4.0, 9.0, 14.0, 19.... |
y.flatten(), FixedImpl.FP16x16))
name = "col2im_strides"
func_sig = "NNTrait::col2im("
func_sig += "@input_0,"
func_sig += "array![5, 5].span(),"
func_sig += "array![3, 3].span(),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Optio... |
ticmethod
def export_col2im_dilations() -> None:
x = np.array(
[
[
[1.0, 5.0, 9.0, 13.0, 17],
[2.0, 6.0, 10.0, 14.0, 18],
[3.0, 7.0, 11.0, 15.0, 19],
[4.0, 8.0, 12.0, 16.0, 20],
]
... |
).astype(np.int64)
block_shape = np.array([1, 1, 5]).astype(np.int64)
y = col2im(x,image_shape,block_shape)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Compress(RunAll):
@staticmethod
def compress_fp16x16():
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=0)
... |
_fp16x16_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
def axis3():
x1 = np.arange(0,96).reshape(4,3,4, 2).astype(np.int64)
... |
.compress(x2, axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "compress_fp8x23_3d_default"
... |
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
x2 = np.array([0, 1, 1]).astype(np.uint8)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x... |
:
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
x2 = np.array([0, 1, 1]).astype(np.int32)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
... |
hod
def compress_u32():
def compress_3D():
def default():
x1 = np.arange(0,48).reshape(4,4,3).astype(np.uint32)
x2 = np.array([1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.U32, x1.shape, x1.f... |
e(3,4,5).astype(np.uint32)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
... |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Concat(RunAll):
@staticmethod
def concat_u32():
def concat_1D():
x1 = np.arange(0,3).astype(np.uint32)
x2 = np.arange(3,6).astype(np.uint32)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U... |
End of preview. Expand in Data Studio
This dataset is a truncated version of this one but where the format is compatible with MLX-lora, using {"text": "This is an example for the model."}, and where each entry has been truncated, following some code logic (i.e., following classes, functions etc) to ensure each entry is smaller than 2048 tokens.
- Downloads last month
- 9