Skip to content

Commit 7aa7703

Browse files
committed
Finish T1-1-18: group_norm、reshape、bitwise_xor、isclose、rrelu
1 parent 3c8fb3c commit 7aa7703

32 files changed

Lines changed: 1094 additions & 15 deletions
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#pragma once
2+
#include "../tensor.hpp"
3+
#include "common/op.hpp"
4+
5+
namespace infinicore::op {
6+
7+
class BitwiseXor {
8+
public:
9+
using schema = void (*)(Tensor, Tensor, Tensor);
10+
static void execute(Tensor a, Tensor b, Tensor out);
11+
static common::OpDispatcher<schema> &dispatcher();
12+
};
13+
14+
Tensor bitwise_xor(Tensor a, Tensor b);
15+
Tensor& bitwise_xor_out(Tensor a, Tensor b, Tensor& out);
16+
17+
} // namespace infinicore::op
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
#include <optional>
6+
7+
namespace infinicore::op {
8+
9+
class GroupNorm {
10+
public:
11+
using schema = void (*)(Tensor, int64_t, std::optional<Tensor>, std::optional<Tensor>, double, Tensor);
12+
static void execute(Tensor input, int64_t num_groups, std::optional<Tensor> weight, std::optional<Tensor> bias, double eps, Tensor output);
13+
static common::OpDispatcher<schema> &dispatcher();
14+
};
15+
16+
Tensor group_norm(Tensor input, int64_t num_groups, std::optional<Tensor> weight, std::optional<Tensor> bias, double eps);
17+
void group_norm_(Tensor input, int64_t num_groups, std::optional<Tensor> weight, std::optional<Tensor> bias, double eps, Tensor output);
18+
19+
} // namespace infinicore::op

include/infinicore/ops/isclose.hpp

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
#pragma once
2+
#include "../tensor.hpp"
3+
#include "common/op.hpp"
4+
5+
namespace infinicore::op {
6+
7+
class IsClose {
8+
public:
9+
using schema = void (*)(Tensor, Tensor, double, double, bool, Tensor);
10+
static void execute(Tensor a, Tensor b, double rtol, double atol, bool equal_nan, Tensor out);
11+
static common::OpDispatcher<schema> &dispatcher();
12+
};
13+
14+
Tensor isclose(Tensor a, Tensor b, double rtol = 1e-05, double atol = 1e-08, bool equal_nan = false);
15+
16+
} // namespace infinicore::op

include/infinicore/ops/reshape.hpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
#pragma once
2+
#include "../tensor.hpp"
3+
#include "common/op.hpp"
4+
5+
namespace infinicore::op {
6+
7+
// Reshape 主要是元数据操作,不需要像 Compute Op 那样定义 Schema/Dispatcher
8+
Tensor reshape(Tensor input, Shape shape);
9+
10+
} // namespace infinicore::op

include/infinicore/ops/rrelu.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#pragma once
2+
#include "../tensor.hpp"
3+
#include "common/op.hpp"
4+
5+
namespace infinicore::op {
6+
7+
class RReLU {
8+
public:
9+
using schema = void (*)(Tensor, double, double, bool, Tensor);
10+
static void execute(Tensor input, double lower, double upper, bool training, Tensor output);
11+
static common::OpDispatcher<schema> &dispatcher();
12+
};
13+
14+
Tensor rrelu(Tensor input, double lower = 0.125, double upper = 0.3333333333333333, bool training = false, bool inplace = false);
15+
// In-place
16+
Tensor& rrelu_(Tensor input, double lower = 0.125, double upper = 0.3333333333333333, bool training = false);
17+
18+
} // namespace infinicore::op

python/infinicore/__init__.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,9 @@
5454
from infinicore.ops.rearrange import rearrange
5555
from infinicore.ops.squeeze import squeeze
5656
from infinicore.ops.unsqueeze import unsqueeze
57+
from infinicore.ops.reshape import reshape
58+
from infinicore.ops.bitwise_xor import bitwise_xor
59+
from infinicore.ops.isclose import isclose
5760
from infinicore.tensor import (
5861
Tensor,
5962
empty,
@@ -134,6 +137,9 @@
134137
"strided_empty",
135138
"strided_from_blob",
136139
"zeros",
140+
"reshape",
141+
"bitwise_xor",
142+
"isclose",
137143
]
138144

139145
use_ntops = False

python/infinicore/nn/functional/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
from .rope import RopeAlgo, rope
77
from .silu import silu
88
from .swiglu import swiglu
9+
from .group_norm import group_norm
10+
from .rrelu import rrelu
911

1012
__all__ = [
1113
"causal_softmax",
@@ -17,4 +19,6 @@
1719
"embedding",
1820
"rope",
1921
"RopeAlgo",
22+
"group_norm",
23+
"rrelu",
2024
]
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import infinicore
2+
from infinicore.lib import _infinicore
3+
from infinicore.tensor import Tensor
4+
5+
6+
def group_norm(
7+
input: Tensor,
8+
num_groups: int,
9+
weight: Tensor | None = None,
10+
bias: Tensor | None = None,
11+
eps: float = 1e-5,
12+
) -> Tensor:
13+
r"""Applies Group Normalization."""
14+
15+
if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
16+
return infinicore.ntops.torch.group_norm(
17+
input, num_groups, weight=weight, bias=bias, eps=eps
18+
)
19+
20+
weight_underlying = weight._underlying if weight is not None else None
21+
bias_underlying = bias._underlying if bias is not None else None
22+
23+
# Call C++ Backend via PyBind
24+
return Tensor(
25+
_infinicore.group_norm(
26+
input._underlying,
27+
num_groups,
28+
weight_underlying,
29+
bias_underlying,
30+
eps
31+
)
32+
)
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import infinicore
2+
from infinicore.lib import _infinicore
3+
from infinicore.tensor import Tensor
4+
5+
def rrelu(input: Tensor, lower: float = 0.125, upper: float = 0.3333333333333333, training: bool = False, inplace: bool = False, generator=None) -> Tensor:
6+
r"""Applies the randomized leaky rectified linear unit function."""
7+
8+
if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
9+
return infinicore.ntops.torch.rrelu(input, lower, upper, training, inplace, generator)
10+
11+
return Tensor(_infinicore.rrelu(input._underlying, lower, upper, training, inplace))
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import infinicore
2+
from infinicore.lib import _infinicore
3+
from infinicore.tensor import Tensor
4+
5+
def bitwise_xor(input: Tensor, other: Tensor, out: Tensor | None = None) -> Tensor:
6+
"""Computes the bitwise XOR of input and other."""
7+
8+
if infinicore.use_ntops and input.device.type in ("cuda", "musa"):
9+
return infinicore.ntops.torch.bitwise_xor(input, other, out=out)
10+
11+
if out is not None:
12+
_infinicore.bitwise_xor_out(input._underlying, other._underlying, out._underlying)
13+
return out
14+
else:
15+
return Tensor(_infinicore.bitwise_xor(input._underlying, other._underlying))

0 commit comments

Comments
 (0)