|
| 1 | +import infinicore |
| 2 | +from infinicore.lib import _infinicore |
| 3 | +from infinicore.tensor import Tensor |
| 4 | + |
| 5 | + |
| 6 | +def var( |
| 7 | + input: Tensor, |
| 8 | + dim: int | tuple[int] | list[int] | None = None, |
| 9 | + unbiased: bool | None = None, |
| 10 | + correction: int | None = None, |
| 11 | + keepdim: bool = False, |
| 12 | + *, |
| 13 | + dtype=None, |
| 14 | + out=None, |
| 15 | +) -> Tensor: |
| 16 | + r"""Returns the variance of the input tensor.""" |
| 17 | + |
| 18 | + if unbiased is not None: |
| 19 | + if correction is not None and correction != (1 if unbiased else 0): |
| 20 | + raise ValueError( |
| 21 | + "Cannot specify both 'unbiased' and 'correction' with conflicting values." |
| 22 | + ) |
| 23 | + final_correction = 1 if unbiased else 0 |
| 24 | + else: |
| 25 | + final_correction = correction if correction is not None else 1 |
| 26 | + |
| 27 | + if infinicore.use_ntops and input.device.type in ("cuda", "musa"): |
| 28 | + return infinicore.ntops.torch.var( |
| 29 | + input, |
| 30 | + dim=dim, |
| 31 | + correction=final_correction, |
| 32 | + keepdim=keepdim, |
| 33 | + dtype=dtype, |
| 34 | + out=out, |
| 35 | + ) |
| 36 | + |
| 37 | + if dim is None: |
| 38 | + if out is None: |
| 39 | + return Tensor(_infinicore.var_global(input._underlying, final_correction)) |
| 40 | + _infinicore.var_global_(input._underlying, out._underlying, final_correction) |
| 41 | + return out |
| 42 | + else: |
| 43 | + target_dim = dim |
| 44 | + if isinstance(target_dim, (tuple, list)): |
| 45 | + if len(target_dim) == 1: |
| 46 | + target_dim = target_dim[0] |
| 47 | + |
| 48 | + if out is None: |
| 49 | + return Tensor( |
| 50 | + _infinicore.var_reduce( |
| 51 | + input._underlying, target_dim, final_correction, keepdim |
| 52 | + ) |
| 53 | + ) |
| 54 | + |
| 55 | + _infinicore.var_reduce_( |
| 56 | + input._underlying, out._underlying, target_dim, final_correction, keepdim |
| 57 | + ) |
| 58 | + return out |
0 commit comments