|
| 1 | +#ifndef INFINI_OPS_ASCEND_GEMM_KERNEL_H_ |
| 2 | +#define INFINI_OPS_ASCEND_GEMM_KERNEL_H_ |
| 3 | + |
| 4 | +#include "acl/acl.h" |
| 5 | +#include "aclnn/aclnn_base.h" |
| 6 | +#include "aclnnop/aclnn_addmm.h" |
| 7 | +#include "aclnnop/aclnn_baddbmm.h" |
| 8 | +#include "ascend/common.h" |
| 9 | +#include "ascend/workspace_pool_.h" |
| 10 | +#include "base/gemm.h" |
| 11 | +#include "operator.h" |
| 12 | + |
| 13 | +namespace infini::ops { |
| 14 | + |
| 15 | +template <> |
| 16 | +class Operator<Gemm, Device::Type::kAscend> : public Gemm { |
| 17 | + public: |
| 18 | + Operator(const Tensor a, const Tensor b, std::optional<float> alpha, |
| 19 | + std::optional<float> beta, std::optional<int> trans_a, |
| 20 | + std::optional<int> trans_b, Tensor c) |
| 21 | + : Gemm(a, b, alpha, beta, trans_a, trans_b, c), |
| 22 | + batched_{batch_count_ > 1}, |
| 23 | + alpha_val_{alpha.value_or(1.0f)}, |
| 24 | + beta_val_{beta.value_or(1.0f)} { |
| 25 | + alpha_scalar_ = aclCreateScalar(&alpha_val_, ACL_FLOAT); |
| 26 | + beta_scalar_ = aclCreateScalar(&beta_val_, ACL_FLOAT); |
| 27 | + } |
| 28 | + |
| 29 | + ~Operator() { |
| 30 | + aclDestroyScalar(alpha_scalar_); |
| 31 | + aclDestroyScalar(beta_scalar_); |
| 32 | + } |
| 33 | + |
| 34 | + void operator()(const Tensor a, const Tensor b, std::optional<float> alpha, |
| 35 | + std::optional<float> beta, std::optional<int> trans_a, |
| 36 | + std::optional<int> trans_b, Tensor c) const override { |
| 37 | + auto stream = static_cast<aclrtStream>(stream_); |
| 38 | + |
| 39 | + auto t_self = ascend::buildAclTensor(c); |
| 40 | + auto t_a = ascend::buildAclTensor(a, trans_a_); |
| 41 | + auto t_b = ascend::buildAclTensor(b, trans_b_); |
| 42 | + auto t_out = ascend::buildAclTensor(c); |
| 43 | + |
| 44 | + uint64_t ws_needed = 0; |
| 45 | + aclOpExecutor* executor = nullptr; |
| 46 | + |
| 47 | + if (batched_) { |
| 48 | + aclnnBaddbmmGetWorkspaceSize(t_self, t_a, t_b, beta_scalar_, |
| 49 | + alpha_scalar_, t_out, 0, &ws_needed, |
| 50 | + &executor); |
| 51 | + } else { |
| 52 | + aclnnAddmmGetWorkspaceSize(t_self, t_a, t_b, beta_scalar_, alpha_scalar_, |
| 53 | + t_out, 0, &ws_needed, &executor); |
| 54 | + } |
| 55 | + |
| 56 | + auto& arena = ascend::workspacePool().ensure(stream, ws_needed); |
| 57 | + |
| 58 | + if (batched_) { |
| 59 | + aclnnBaddbmm(arena.buf, ws_needed, executor, stream); |
| 60 | + } else { |
| 61 | + aclnnAddmm(arena.buf, ws_needed, executor, stream); |
| 62 | + } |
| 63 | + |
| 64 | + aclDestroyTensor(t_self); |
| 65 | + aclDestroyTensor(t_a); |
| 66 | + aclDestroyTensor(t_b); |
| 67 | + aclDestroyTensor(t_out); |
| 68 | + } |
| 69 | + |
| 70 | + private: |
| 71 | + bool batched_; |
| 72 | + float alpha_val_; |
| 73 | + float beta_val_; |
| 74 | + aclScalar* alpha_scalar_ = nullptr; |
| 75 | + aclScalar* beta_scalar_ = nullptr; |
| 76 | +}; |
| 77 | + |
| 78 | +} // namespace infini::ops |
| 79 | + |
| 80 | +#endif |
0 commit comments