Skip to content

Commit cc97ea2

Browse files
Ziminlikilinchange
authored andcommitted
fix: change log level from ERROR to appropriate ones (INFO and FATAL) and change unsuporrted data type handling to log fatal in elementwise.cu and softmax.cu
1 parent ca5c657 commit cc97ea2

2 files changed

Lines changed: 12 additions & 10 deletions

File tree

infini_train/src/kernels/cuda/elementwise.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ template <typename Func> std::shared_ptr<Tensor> UnaryForward(const std::shared_
153153
LaunchForward<256, float>(unary_fn, output, input);
154154
break;
155155
default:
156-
return nullptr;
156+
LOG(FATAL) << "CUDA unary forward: 'Unsupported data type' at " << __FILE__ << ":" << __LINE__;
157157
}
158158

159159
return output;
@@ -170,7 +170,7 @@ std::shared_ptr<Tensor> UnaryBackward(const std::shared_ptr<Tensor> &grad_output
170170
LaunchBackward<256, float>(unary_fn, output, grad_output, a);
171171
break;
172172
default:
173-
return nullptr;
173+
LOG(FATAL) << "CUDA unary backward: 'Unsupported data type' at " << __FILE__ << ":" << __LINE__;
174174
}
175175

176176
return output;
@@ -190,7 +190,7 @@ std::shared_ptr<Tensor> BinaryForward(const std::shared_ptr<Tensor> &a, const st
190190
LaunchForward<256, float>(binary_fn, output, a, b);
191191
break;
192192
default:
193-
return nullptr;
193+
LOG(FATAL) << "CUDA binary forward: 'Unsupported data type' at " << __FILE__ << ":" << __LINE__;
194194
}
195195

196196
return output;
@@ -225,7 +225,7 @@ BinaryBackward(const std::shared_ptr<Tensor> &grad_output, const std::shared_ptr
225225
LaunchBackward<256, float>(fn_a, fn_b, grad_a, grad_b, a_num_elements, b_num_elements, grad_output, a, b);
226226
break;
227227
default:
228-
return {nullptr, nullptr};
228+
LOG(FATAL) << "CUDA binary backward: 'Unsupported data type' at " << __FILE__ << ":" << __LINE__;
229229
}
230230

231231
return {grad_a, grad_b};

infini_train/src/kernels/cuda/softmax.cu

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,8 @@ void LaunchForward(const std::shared_ptr<Tensor> &output, const std::shared_ptr<
7070
for (int i = 0; i < dim; ++i) { outer_size *= input_dims[i]; };
7171
for (int i = dim + 1; i < input_dims.size(); ++i) { inner_size *= input_dims[i]; };
7272
if (axis_size == 0) {
73-
LOG(ERROR) << "CUDA softmax forward: 'input_dims[dim] == 0' at " << __FILE__ << ":" << __LINE__;
73+
LOG(INFO) << "CUDA softmax forward: 'input_dims[dim] == 0' at " << __FILE__ << ":" << __LINE__;
74+
return;
7475
}
7576
if (outer_size == 0) {
7677
return;
@@ -83,7 +84,7 @@ void LaunchForward(const std::shared_ptr<Tensor> &output, const std::shared_ptr<
8384
cudaGetDeviceProperties(&prop, input->GetDevice().Index());
8485

8586
if (BLOCK_SIZE > prop.maxThreadsPerBlock) {
86-
LOG(ERROR) << "CUDA softmax forward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
87+
LOG(FATAL) << "CUDA softmax forward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
8788
<< __FILE__ << ":" << __LINE__;
8889
}
8990
dim3 block_dims(BLOCK_SIZE);
@@ -105,7 +106,7 @@ std::shared_ptr<Tensor> SoftmaxForward(const std::shared_ptr<Tensor> &input, int
105106
LaunchForward<256, float>(output, input, dim);
106107
break;
107108
default:
108-
return nullptr;
109+
LOG(FATAL) << "CUDA softmax forward: 'Unsupported data type' at " << __FILE__ << ":" << __LINE__;
109110
}
110111

111112
return output;
@@ -154,7 +155,8 @@ void LaunchBackward(const std::shared_ptr<Tensor> &grad_input, const std::shared
154155
for (int i = 0; i < dim; ++i) { outer_size *= output_dims[i]; };
155156
for (int i = dim + 1; i < output_dims.size(); ++i) { inner_size *= output_dims[i]; };
156157
if (axis_size == 0) {
157-
LOG(ERROR) << "CUDA softmax backward: 'output_dims[dim] == 0' at " << __FILE__ << ":" << __LINE__;
158+
LOG(INFO) << "CUDA softmax backward: 'output_dims[dim] == 0' at " << __FILE__ << ":" << __LINE__;
159+
return;
158160
}
159161
if (outer_size == 0) {
160162
return;
@@ -168,7 +170,7 @@ void LaunchBackward(const std::shared_ptr<Tensor> &grad_input, const std::shared
168170
cudaGetDeviceProperties(&prop, output->GetDevice().Index());
169171

170172
if (BLOCK_SIZE > prop.maxThreadsPerBlock) {
171-
LOG(ERROR) << "CUDA softmax backward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
173+
LOG(FATAL) << "CUDA softmax backward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
172174
<< __FILE__ << ":" << __LINE__;
173175
}
174176
dim3 block(BLOCK_SIZE);
@@ -192,7 +194,7 @@ std::shared_ptr<Tensor> SoftmaxBackward(const std::shared_ptr<Tensor> &grad_outp
192194
LaunchBackward<256, float>(grad_input, grad_output, output, dim);
193195
break;
194196
default:
195-
return nullptr;
197+
LOG(FATAL) << "CUDA softmax backward: 'Unsupported data type' at " << __FILE__ << ":" << __LINE__;
196198
}
197199

198200
return grad_input;

0 commit comments

Comments
 (0)