@@ -70,7 +70,8 @@ void LaunchForward(const std::shared_ptr<Tensor> &output, const std::shared_ptr<
7070 for (int i = 0 ; i < dim; ++i) { outer_size *= input_dims[i]; };
7171 for (int i = dim + 1 ; i < input_dims.size (); ++i) { inner_size *= input_dims[i]; };
7272 if (axis_size == 0 ) {
73- LOG (ERROR) << " CUDA softmax forward: 'input_dims[dim] == 0' at " << __FILE__ << " :" << __LINE__;
73+ LOG (INFO) << " CUDA softmax forward: 'input_dims[dim] == 0' at " << __FILE__ << " :" << __LINE__;
74+ return ;
7475 }
7576 if (outer_size == 0 ) {
7677 return ;
@@ -83,7 +84,7 @@ void LaunchForward(const std::shared_ptr<Tensor> &output, const std::shared_ptr<
8384 cudaGetDeviceProperties (&prop, input->GetDevice ().Index ());
8485
8586 if (BLOCK_SIZE > prop.maxThreadsPerBlock ) {
86- LOG (ERROR ) << " CUDA softmax forward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
87+ LOG (FATAL ) << " CUDA softmax forward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
8788 << __FILE__ << " :" << __LINE__;
8889 }
8990 dim3 block_dims (BLOCK_SIZE);
@@ -105,7 +106,7 @@ std::shared_ptr<Tensor> SoftmaxForward(const std::shared_ptr<Tensor> &input, int
105106 LaunchForward<256 , float >(output, input, dim);
106107 break ;
107108 default :
108- return nullptr ;
109+ LOG (FATAL) << " CUDA softmax forward: 'Unsupported data type' at " << __FILE__ << " : " << __LINE__ ;
109110 }
110111
111112 return output;
@@ -154,7 +155,8 @@ void LaunchBackward(const std::shared_ptr<Tensor> &grad_input, const std::shared
154155 for (int i = 0 ; i < dim; ++i) { outer_size *= output_dims[i]; };
155156 for (int i = dim + 1 ; i < output_dims.size (); ++i) { inner_size *= output_dims[i]; };
156157 if (axis_size == 0 ) {
157- LOG (ERROR) << " CUDA softmax backward: 'output_dims[dim] == 0' at " << __FILE__ << " :" << __LINE__;
158+ LOG (INFO) << " CUDA softmax backward: 'output_dims[dim] == 0' at " << __FILE__ << " :" << __LINE__;
159+ return ;
158160 }
159161 if (outer_size == 0 ) {
160162 return ;
@@ -168,7 +170,7 @@ void LaunchBackward(const std::shared_ptr<Tensor> &grad_input, const std::shared
168170 cudaGetDeviceProperties (&prop, output->GetDevice ().Index ());
169171
170172 if (BLOCK_SIZE > prop.maxThreadsPerBlock ) {
171- LOG (ERROR ) << " CUDA softmax backward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
173+ LOG (FATAL ) << " CUDA softmax backward: 'BLOCK_SIZE used is larger than the max number of thread per block' at "
172174 << __FILE__ << " :" << __LINE__;
173175 }
174176 dim3 block (BLOCK_SIZE);
@@ -192,7 +194,7 @@ std::shared_ptr<Tensor> SoftmaxBackward(const std::shared_ptr<Tensor> &grad_outp
192194 LaunchBackward<256 , float >(grad_input, grad_output, output, dim);
193195 break ;
194196 default :
195- return nullptr ;
197+ LOG (FATAL) << " CUDA softmax backward: 'Unsupported data type' at " << __FILE__ << " : " << __LINE__ ;
196198 }
197199
198200 return grad_input;
0 commit comments