|
| 1 | +import functools |
| 2 | + |
| 3 | +import ninetoothed |
| 4 | +import ninetoothed.language as ntl |
| 5 | +from ninetoothed import Tensor |
| 6 | +from ninetoothed.language import libdevice |
| 7 | + |
| 8 | +from ntops.kernels.element_wise import arrangement |
| 9 | + |
| 10 | + |
| 11 | +def application(input, output): |
| 12 | + output = libdevice.nearbyint(ntl.cast(input, ntl.float32)) # noqa: F841 |
| 13 | + |
| 14 | + |
| 15 | +def application_with_decimals(input, factor, inv_factor, output): |
| 16 | + scaled = input * ntl.cast( |
| 17 | + factor, input.dtype |
| 18 | + ) # 在 input 的原始精度下乘,匹配 torch 行为 |
| 19 | + output = libdevice.nearbyint(ntl.cast(scaled, ntl.float32)) * inv_factor # noqa: F841 |
| 20 | + |
| 21 | + |
| 22 | +def premake(ndim, decimals=0, dtype=None, block_size=None): |
| 23 | + arrangement_ = functools.partial(arrangement, block_size=block_size) |
| 24 | + |
| 25 | + if decimals == 0: |
| 26 | + tensors = (Tensor(ndim, dtype=dtype), Tensor(ndim, dtype=dtype)) |
| 27 | + return arrangement_, application, tensors |
| 28 | + else: |
| 29 | + tensors = ( |
| 30 | + Tensor(ndim, dtype=dtype), |
| 31 | + Tensor(0, dtype=ninetoothed.float64), |
| 32 | + Tensor(0, dtype=ninetoothed.float64), |
| 33 | + Tensor(ndim, dtype=dtype), |
| 34 | + ) |
| 35 | + return arrangement_, application_with_decimals, tensors |
0 commit comments