Commit 7ac8e731 by xuchen

Merge branch 'xuchen' into xiaotong-working

parents 7ae1562d 7e9d7015
......@@ -127,7 +127,6 @@ struct FNNNet
};
/* entry of the program */
extern "C"
int FNNLMMain(int argc, const char ** argv);
};
......
......@@ -47,9 +47,9 @@ extern const char * GetDataTypeName(TENSOR_DATA_TYPE type);
extern TENSOR_DATA_TYPE GetDataType(const char * typeName);
/* data conversion (for lower precision computation) */
extern "C" unsigned short FloatToFloat16(float f);
extern "C" float Float16ToFloat(unsigned short h);
extern "C" void ConvertDataType(int devID,
unsigned short FloatToFloat16(float f);
float Float16ToFloat(unsigned short h);
void ConvertDataType(int devID,
void * s, TENSOR_DATA_TYPE typeS,
void * t, TENSOR_DATA_TYPE typeT, int size);
......
......@@ -486,9 +486,8 @@ quick sorting
NOTE: this means that the items may not placed in a continuous memory space
>> comp - the comparison function
*/
void XQSort(void * dataA, void * dataB, void * index, int num, int width, int stride, int (*comp)(const void *, const void *))
void XQSort(void * data, void * index, int num, int width, int stride, int (*comp)(const void *, const void *))
{
XMemCopy(dataB, -1, dataA, -1, num * width);
char *lo, *hi; // ends of sub-array currently sorting
int *indexlo, *indexhi;
char *mid; // points to middle of subarray
......@@ -507,8 +506,8 @@ void XQSort(void * dataA, void * dataB, void * index, int num, int width, int st
stackptr = 0;
lo = (char*)dataB;
hi = (char*)dataB + realStride * (num - 1);
lo = (char*)data;
hi = (char*)data + realStride * (num - 1);
indexlo = (int*)index;
indexhi = index != NULL ? (int*)index + stride * (num - 1) : NULL;
......
......@@ -53,7 +53,7 @@ extern void XSleep(int sleepTime);
extern double GetClock();
extern double GetClockSec();
extern void XQSort(void * dataA, void * dataB, void * index, int num, int width, int stride, int (*comp)(const void *, const void *));
extern void XQSort(void * data, void * index, int num, int width, int stride, int (*comp)(const void *, const void *));
extern int CompXFloat(const void * a, const void * b);
#ifdef USE_CUDA
......
......@@ -60,7 +60,6 @@ set each entry to its absolute value
>> a - input tensor
>> b - output tensor
*/
extern "C"
void _CudaAbsolute(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
......
......@@ -34,7 +34,6 @@ __global__
void KernelAbsolute(__half * a, __half * b, int size);
/* set each entry to its absolute value */
extern "C"
void _CudaAbsolute(const XTensor * a, XTensor * b);
#endif // USE_CUDA
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* matrix multiplication in batch mode (CPU code) */
extern "C"
void _MatrixMULBatchedCPU(const XList * a, MATRIX_TRANS_TYPE transposedA, const XList * b, MATRIX_TRANS_TYPE transposedB,
XList * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0);
......
......@@ -46,10 +46,10 @@ c = a * b * \alpha
>> cRowSize - row size of matrix c
>> alpha - the scaling factor
*/
extern "C" __global__
__global__
void KernelMatrixMulDenseMSparseMV2(DTYPE * a, MATRIX_TRANS_TYPE transposedA, int aColSize, int aRowSize,
void * b, MATRIX_TRANS_TYPE transposedB, int bNonZeroNum, int bColSize, int bRowSize,
DTYPE * c, int cColSize, int cRowSize, DTYPE alpha)
void * b, MATRIX_TRANS_TYPE transposedB, int bNonZeroNum, int bColSize, int bRowSize,
DTYPE * c, int cColSize, int cRowSize, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......
......@@ -32,17 +32,16 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
mutilication of a dense matrix with a sparse vector
c = a * b * \alpha
*/
extern "C" __global__
__global__
void KernelMatrixMulDenseMSparseMV2(DTYPE * a, MATRIX_TRANS_TYPE transposedA, int aColSize, int aRowSize,
void * b, MATRIX_TRANS_TYPE transposedB, int bNonZeroNum, int bColSize, int bRowSize,
DTYPE * c, int cColSize, int cRowSize, DTYPE alpha);
void * b, MATRIX_TRANS_TYPE transposedB, int bNonZeroNum, int bColSize, int bRowSize,
DTYPE * c, int cColSize, int cRowSize, DTYPE alpha);
/*
matrix multiplication (for 2d tensors) (cuda version)
c = trans(a) * trans(b) * alpha + c * beta
where trans() return the transposed matrix if the flag is fired
*/
extern "C"
void _CudaMatrixMul2D(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB, XTensor * c,
DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0, XStream * stream = NULL);
......
......@@ -30,7 +30,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
matrix multiplication for a block (x1,y1) - (x2,y2)
where (x1,y1) is the upper-left corner and (x2,y2) is the bottom-right corner
*/
extern "C"
void _MatrixMul2DMultiTheading(XList * args);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -31,7 +31,6 @@ matrix multiplication (for 2d tensors) with multi-threading.
c = trans(a) * trans(b) * alpha + c * beta
where trans() return the transposed matrix if the flag is fired.
*/
extern "C"
void _MatrixMul2DParallel(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB,
XTensor * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0, XPRunner * parallelRunner = NULL);
......
......@@ -113,10 +113,10 @@ void _MatrixMulBatched(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
cublasHandle_t * handle = a->mem != NULL ? a->mem->GetCublasHandle() : GDevs.GetCudaHandle(a->devID);
_CudaBLASMatrixMULList(handle,
aList, transposedA,
bList, transposedB,
cList, aList->count,
alpha, beta);
aList, transposedA,
bList, transposedB,
cList, aList->count,
alpha, beta);
BacktoCudaDev(a->devID, devIDBackup);
#else
......
......@@ -34,7 +34,7 @@ multiplication of data arrays in a element-wise manner c(i) = a(i)*b(i)
>> c - result data array
>> size - size of c
*/
extern "C" __global__
__global__
void KernelMulElementWise(DTYPE * a, DTYPE * b, DTYPE * c, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -51,7 +51,7 @@ multiplication of data arrays in a element-wise manner c(i) = a(i)*b(i) + \alpha
>> size - size of c
>> alpha - the coefficient
*/
extern "C" __global__
__global__
void KernelMulElementWiseV2(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -120,7 +120,6 @@ where i is the item index
>> alpha - the coefficient
>> leadingDim - dimension along which we perform broadcasting
*/
extern "C"
void _CudaMultiply(const XTensor * a, const XTensor * b, XTensor * c, DTYPE alpha, int leadingDim)
{
int leadingDimRDI = a->order - leadingDim - 1;
......
......@@ -29,11 +29,11 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* multiplication of two tensors in a element-wise manner c(i) = a(i)*b(i) */
extern "C" __global__
__global__
void KernelMulElementWise(DTYPE * a, DTYPE * b, DTYPE * c, int size);
/* multiplication of two tensors in a element-wise manner c(i) = a(i)*b(i) + \alpha*c(i) */
extern "C" __global__
__global__
void KernelMulElementWiseV2(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE alpha);
/* multiplication of two tensors in a element-wise manner c(i) = a(i)*b(i)+ \alpha*c(i) */
......@@ -41,7 +41,6 @@ template<int nonZeroAlpha>__global__
void KernelMulElementWiseTensorDynamic(DTYPE * a, DTYPE * b, DTYPE * c, DTYPE alpha, int stride, int ldSizeA, int ldSizeB, int ldSizeC, int blockNum);
/* element-wise product of two tensors */
extern "C"
void _CudaMultiply(const XTensor * a, const XTensor * b, XTensor * c, DTYPE alpha = 0, int leadingDim = 0);
#endif // USE_CUDA
......
......@@ -68,7 +68,6 @@ set each entry to its negtive value
>> a - input tensor
>> b - output tensor
*/
extern "C"
void _CudaNegate(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
......
......@@ -37,7 +37,6 @@ __global__
void KernelNegate(__half * a, __half * b, int size);
/* set each entry to its negtive value */
extern "C"
void _CudaNegate(const XTensor * a, XTensor * b);
#endif // USE_CUDA
......
......@@ -66,7 +66,6 @@ set each entry to its sign value
>> a - input tensor we are processing
>> b - output tensor we are processing
*/
extern "C"
void _CudaSign(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
......
......@@ -37,7 +37,6 @@ __global__
void KernelSign(__half * a, __half * b, int size);
/* set each entry to its sign value */
extern "C"
void _CudaSign(const XTensor * a, XTensor * b);
#endif // USE_CUDA
......
......@@ -35,7 +35,7 @@ c = a + b * \beta
>> size - the size of a/b/c
>> beta - the coefficient
*/
extern "C" __global__
__global__
void KernelADD(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......
......@@ -29,15 +29,13 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* summation of data arrays (CUDA Kernel) */
extern "C" __global__
__global__
void KernelADD(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta = (DTYPE)1.0);
/* tensor summation c = a + b * \beta (cuda version) */
extern "C"
void _CudaSum(const XTensor * a, const XTensor * b, XTensor * c = NULL, DTYPE beta = (DTYPE)1.0);
/* tensor summation c = a + b * \beta (cuda version) with an input handle */
extern "C"
void _CudaSumWithHandle(int devID, cublasHandle_t * handle, DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta = (DTYPE)1.0);
#endif // USE_CUDA
......
......@@ -42,7 +42,7 @@ void _SumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE bet
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((b->order == 2 && b->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
"Illegal input vector size!");
int rowNum = a->dimSize[0];
int colNum = a->dimSize[1];
......
......@@ -39,7 +39,7 @@ c_col = a_col + b * \beta
>> size - size of the entire data array
>> beta - the scaling factor
*/
extern "C" __global__
__global__
void KernelADDByColumnTV(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int blockSize, int size, DTYPE beta)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -69,9 +69,9 @@ void _CudaSumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((b->order == 2 && b->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
c->dataType == DEFAULT_DTYPE), "TODO");
c->dataType == DEFAULT_DTYPE), "TODO");
int rowNum = a->dimSize[0];
int colNum = a->dimSize[1];
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* summation of a tensor and a vector (column vector) */
extern "C"
void _CudaSumByColumnTV(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta = (DTYPE)1.0);
#endif // USE_CUDA
......
......@@ -42,7 +42,7 @@ void _SumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE bet
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((a->order == 2 && a->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
"Illegal input vector size!");
if (a->devID >= 0 || b->devID >= 0 || c->devID >= 0) {
#ifdef USE_CUDA
......
......@@ -39,7 +39,7 @@ c = a + \sum{col} b_col * \beta
>> size - size of the entire data array
>> beta - the scaling factor
*/
extern "C" __global__
__global__
void KernelADDByColumnVT(DTYPE * a, DTYPE * b, DTYPE * c, int colNum, int rowNum, int blockNum, DTYPE beta)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -85,9 +85,9 @@ void _CudaSumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((XTensor::IsSameShaped(a, c)), "Unmatched tensors in addition!");
CheckNTErrors((a->order == 2 && a->dimSizeRDI[0] == 1 && b->dimSizeRDI[1] == a->dimSizeRDI[1]),
"Illegal input vector size!");
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
c->dataType == DEFAULT_DTYPE), "TODO");
c->dataType == DEFAULT_DTYPE), "TODO");
int rowNum = b->dimSize[0];
int colNum = b->dimSize[1];
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* summation of a vector (column vector) and a tensor */
extern "C"
void _CudaSumByColumnVT(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta = (DTYPE)1.0);
#endif // USE_CUDA
......
......@@ -42,7 +42,7 @@ void _MatrixMULCPU(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
{
CheckNTErrors((a && b && c), "Empty input tensors!");
CheckNTErrors((a->order == 2 && b->order == 2 && c->order == 2),
"Input tensors must have a order = 2!");
"Input tensors must have a order = 2!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "TODO!");
CheckNTErrors((b->dataType == DEFAULT_DTYPE), "TODO!");
CheckNTErrors((c->dataType == DEFAULT_DTYPE), "TODO!");
......
......@@ -143,7 +143,6 @@ void _CudaBLASMatrixMULBatched(cublasHandle_t * handle,
}
/* matrix multiplication in batch and strided mode via cuda version BLAS */
extern "C"
void _CudaBLASMatrixMULBatchedStrided(cublasHandle_t * handle,
const void * a, MATRIX_TRANS_TYPE transposedA, TENSOR_DATA_TYPE dataTypeA, long long int strideA,
const void * b, MATRIX_TRANS_TYPE transposedB, TENSOR_DATA_TYPE dataTypeB, long long int strideB,
......
......@@ -27,14 +27,12 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* matrix multiplication (BLAS) */
extern "C"
void _MatrixMULCPU(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB,
XTensor * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0);
#ifdef USE_CUDA
/* matrix multiplication via cuda version BLAS */
extern "C"
void _CudaBLASMatrixMUL(cublasHandle_t * handle,
const void * a, MATRIX_TRANS_TYPE transposedA, TENSOR_DATA_TYPE dataTypeA,
const void * b, MATRIX_TRANS_TYPE transposedB, TENSOR_DATA_TYPE dataTypeB,
......@@ -42,7 +40,6 @@ void _CudaBLASMatrixMUL(cublasHandle_t * handle,
int na, int ma, int nb, int mb, int nc, int mc, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 1.0);
/* matrix multiplication in batch mode via cuda version BLAS */
extern "C"
void _CudaBLASMatrixMULBatched(cublasHandle_t * handle,
const void ** a, MATRIX_TRANS_TYPE transposedA, TENSOR_DATA_TYPE dataTypeA,
const void ** b, MATRIX_TRANS_TYPE transposedB, TENSOR_DATA_TYPE dataTypeB,
......@@ -51,7 +48,6 @@ void _CudaBLASMatrixMULBatched(cublasHandle_t * handle,
DTYPE alpha = (DTYPE)1.0, DTYPE beta = 1.0);
/* matrix multiplication in batch and strided mode via cuda version BLAS */
extern "C"
void _CudaBLASMatrixMULBatchedStrided(cublasHandle_t * handle,
const void * a, MATRIX_TRANS_TYPE transposedA, TENSOR_DATA_TYPE dataTypeA, long long int strideA,
const void * b, MATRIX_TRANS_TYPE transposedB, TENSOR_DATA_TYPE dataTypeB, long long int strideB,
......@@ -60,7 +56,6 @@ void _CudaBLASMatrixMULBatchedStrided(cublasHandle_t * handle,
DTYPE alpha = (DTYPE)1.0, DTYPE beta = 1.0);
/* matrix multiplication in batch mode via cuda version BLAS */
extern "C"
void _CudaBLASMatrixMULList(cublasHandle_t * handle, const XList * a, MATRIX_TRANS_TYPE transposedA,
const XList * b, MATRIX_TRANS_TYPE transposedB, XList * c,
int count, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 1.0);
......
......@@ -27,14 +27,12 @@
namespace nts{ // namespace nts(NiuTrans.Tensor)
/* generate a tensor with selected data c = select(a) */
extern "C"
void _CudaSelect(const XTensor * a, XTensor * c, XTensor * indexCPU);
/*
generate a tensor with selected data in range[low,high] along the given dimension
c = select(a)
*/
extern "C"
void _CudaSelectRange(const XTensor * a, XTensor * c, int dim, int low, int high);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -60,7 +60,6 @@ set each entry to its log value
>> a - input tensor
>> b - output tensor
*/
extern "C"
void _CudaLog(const XTensor * a, XTensor * b)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
......
......@@ -37,7 +37,6 @@ __global__
void KernelLog(__half * a, __half * b, int size);
/* set each entry to its log value */
extern "C"
void _CudaLog(const XTensor * a, XTensor * b);
#endif // USE_CUDA
......
......@@ -44,8 +44,8 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
*/
__global__
void KernelNormalize(DTYPE * input, DTYPE * output, DTYPE * mean, DTYPE * var,
DTYPE * a, DTYPE * b, DTYPE epsilon,
int stride, int strideNum, int blockNum)
DTYPE * a, DTYPE * b, DTYPE epsilon,
int stride, int strideNum, int blockNum)
{
__shared__ DTYPE iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
......@@ -88,11 +88,10 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
>> b - the bias
>> epsilon - a parameter
*/
extern "C"
void _CudaNormalize(const XTensor * input, XTensor * output, int dim,
const XTensor * mean, const XTensor * var,
const XTensor * a, const XTensor * b,
DTYPE epsilon)
const XTensor * mean, const XTensor * var,
const XTensor * a, const XTensor * b,
DTYPE epsilon)
{
CheckNTErrors((input->dataType == DEFAULT_DTYPE), "TODO!");
......
......@@ -35,18 +35,17 @@ where a and b are the scalar and bias respectively, and \epsilon is the adjustme
*/
__global__
void KernelNormalize(DTYPE * input, DTYPE * output, DTYPE * mean, DTYPE * var,
DTYPE * a, DTYPE * b, DTYPE epsilon,
int stride, int strideNum, int blockNum);
DTYPE * a, DTYPE * b, DTYPE epsilon,
int stride, int strideNum, int blockNum);
/*
normalized the data with normal distribution. For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
*/
extern "C"
void _CudaNormalize(const XTensor * input, XTensor * output, int dim,
const XTensor * mean, const XTensor * var,
const XTensor * a, const XTensor * b, DTYPE epsilon);
const XTensor * mean, const XTensor * var,
const XTensor * a, const XTensor * b, DTYPE epsilon);
#endif // USE_CUDA
......
......@@ -100,7 +100,6 @@ void KernelPower(__half * a, __half * b, __half p, int size)
}
/* get the power of the entries */
extern "C"
void _CudaPower(const XTensor * a, XTensor * b, DTYPE p)
{
CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!");
......
......@@ -37,7 +37,6 @@ __global__
void KernelSqrtV2(__half * a, __half * b, int size);
/* get the power of the entries */
extern "C"
void _CudaPower(const XTensor * a, XTensor * b, DTYPE p);
#endif // USE_CUDA
......
......@@ -47,8 +47,7 @@ void _ScaleAndShift(const XTensor * a, XTensor * b, DTYPE scale, DTYPE shift)
}
#endif
CheckNTErrors((a->dataType == DEFAULT_DTYPE),
"The tensor is not in the default data type!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE), "The tensor is not in the default data type!");
/* sparse tensor */
if(a->isSparse){
......
......@@ -37,7 +37,6 @@ __global__
void KernelScaleAndShift(__half * a, __half * b, int size, __half scale, __half shift);
/* scale and shift all tensor entires b = a * scale + shift (cuda version) */
extern "C"
void _CudaScaleAndShift(const XTensor * a, XTensor * b, DTYPE scale, DTYPE shift);
#endif // USE_CUDA
......
......@@ -86,7 +86,7 @@ void _CopyBlocks(void * source, int blockSize, int * sourceBlocks, int blockNum,
*/
for (int i = 0; i < blockNum; i++) {
XMemCopy((char*)target + targetBlocks[i] * blockSize, devID,
(char*)source + sourceBlocks[i] * blockSize, devID, blockSize);
(char*)source + sourceBlocks[i] * blockSize, devID, blockSize);
}
}
}
......
......@@ -39,7 +39,7 @@ Note that a grid may have a number of blocks
>> isIndexOnDev - indicates whether the index is on the device already
*/
void _CopyBlocksInGrid(void * source, int blockSize, int blockNum, int gridNum, void * target,
int * index, int unitSize, bool isIndexOnDev, XMem * myMem)
int * index, int unitSize, bool isIndexOnDev, XMem * myMem)
{
CheckNTErrors((unitSize == sizeof(int)), "TODO!");
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* copy data by index */
extern "C"
void _CudaCopyBlocksInGrid(void * source, int blockSize, int blockNum, int gridNum, void * target, int * index, int unitSize, XMem * myMem);
#endif // USE_CUDA
......
......@@ -33,7 +33,6 @@ __global__
void KernelCopyBlocks(DTYPE * source, int blockSize, int blockNum, DTYPE * target, int * targetBlocks);
/* copy a number of blocks to target positions (cuda version) */
extern "C"
void _CudaCopyBlocks(void * source, int blockSize, int blockNum, void * target, int * targetBlocks, XMem * myMem);
#endif // USE_CUDA
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* copy a number of blocks to target positions (on site) */
extern "C"
void _CopyBlocksOnSite(void * source, int blockSize, int blockNum, void * target, int * targetBlocks, XMem * myMem);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -72,7 +72,7 @@ copy a number of blocks from source positions to target positions (cuda version)
*/
void _CudaCopyBlocksSelected(void * source, int blockSize, int * sourceBlocks, int blockNum, void * target, int * targetBlocks, XMem * myMem, int devID)
{
CheckNTErrors((devID >= 0), "Wrong device to run!");
CheckNTErrors(devID >= 0, "Wrong device to run!");
CheckNTErrors((blockSize % sizeof(DTYPE) == 0), "Unsupported block size!");
/* copy the index to the GPU memory */
......
......@@ -33,7 +33,6 @@ __global__
void KernelCopyBlocksSelected(DTYPE * source, int blockSize, int * sourceBlocks, int blockNum, DTYPE * target, int * targetBlocks);
/* copy a number of blocks form source positions to target positions (cuda version) */
extern "C"
void _CudaCopyBlocksSelected(void * source, int blockSize, int * sourceBlocks, int blockNum, void * target, int * targetBlocks, XMem * myMem, int devID);
#endif // USE_CUDA
......
......@@ -44,7 +44,7 @@ void _CopyIndexed(const XTensor * s, XTensor * t, int dim, int * srcIndex, int i
{
CheckNTErrors((s && t), "Invalid tensors!");
CheckNTErrors((s->devID == t->devID || (s->devID < 0 && t->devID < 0)),
"the data must be kept on the same device!");
"the data must be kept on the same device!");
CheckNTErrors((dim < s->order && dim < t->order), "A too larget dimension specified!");
CheckNTErrors((s->unitSize == t->unitSize), "Unmatched tensors!");
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* copy selected sub-tensors */
extern "C"
void _CopyIndexed(const XTensor * s, XTensor * t, int dim, int * srcIndex, int indexSize, int * tgtIndex, int copyNum);
/*
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* copy all elements from a source matrix to a target matrix */
extern "C"
void _CudaCopyValues(const XTensor * s, XTensor * t, XStream * stream = NULL);
#endif // USE_CUDA
......
......@@ -101,8 +101,8 @@ crossing of the i-th columne and the j-th row.
*/
__global__
void KernelReduceMax(__half * input, __half * output,
int stride, int strideNum, int reducedStrideNum,
int blockSize, int blockNum)
int stride, int strideNum, int reducedStrideNum,
int blockSize, int blockNum)
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
......@@ -224,8 +224,8 @@ reduce a tensor to another that keeps the max value along a dimension - fast ve
*/
template <unsigned int goodSize> __global__
void KernelReduceMaxFast(__half * input, __half * output,
int stride, int strideNum, int reducedStrideNum,
int blockSize, int blockNum)
int stride, int strideNum, int reducedStrideNum,
int blockSize, int blockNum)
{
unsigned int tid = threadIdx.y;
unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y;
......
......@@ -29,7 +29,6 @@ namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* get the max-valued items along a dimension of the tensor (cuda version) */
extern "C"
void _CudaReduceMax(const XTensor * input, XTensor * output, int dim);
#endif // USE_CUDA
......
......@@ -31,7 +31,6 @@ standard variance of the items along a dimension of the tensor
For a 1-dimensional data array a,
variance = (1/n * \sum_i (a_i - mean)^2)^0.5
*/
extern "C"
void _ReduceStandardVariance(XTensor * input, XTensor * output, int dim, XTensor * mean);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -43,7 +43,7 @@ sum = \sum_i exp((a_i - shift)^power) if isExp == true
void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor * shift, DTYPE power, bool isExp)
{
CheckNTErrors((input->devID == output->devID || (input->devID < 0 && output->devID < 0)),
"This code must be run on the same device!");
"This code must be run on the same device!");
CheckNTErrors((input && output), "Empty input or output tensors!");
CheckNTErrors((input->order == output->order + 1), "Incorrect tensor sizes!");
CheckNTErrors((input->order > dim && dim >=0), "Illegal dimension to reduce!");
......@@ -53,12 +53,10 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
int dimRDI = input->order - dim - 1;
for(int i = 0; i < input->order; i++){
if(i < dimRDI){
CheckNTErrors((input->dimSizeRDI[i] == output->dimSizeRDI[i]),
"Unmatched tensors!");
CheckNTErrors((input->dimSizeRDI[i] == output->dimSizeRDI[i]), "Unmatched tensors!");
}
else if(i > dimRDI){
CheckNTErrors((input->dimSizeRDI[i] == output->dimSizeRDI[i - 1]),
"Unmatched tensors!");
CheckNTErrors((input->dimSizeRDI[i] == output->dimSizeRDI[i - 1]), "Unmatched tensors!");
}
}
......
......@@ -34,7 +34,6 @@ For a 1-dimensional data array a,
sum = \sum_i ((a_i + shift)^power) if isExp == false
sum = \sum_i exp((a_i + shift)^power) if isExp == true
*/
extern "C"
void _CudaReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor * shift, DTYPE power, bool isExp);
#endif // USE_CUDA
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* concatenate a list of tensors along a given dimension */
extern "C"
void _ConcatenateSolely(const XList * smalls, XTensor * big, int dim);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -69,7 +69,6 @@ set target data block index for the data movement in split
>> gridNum - number of grids
>> mem - the memory pool
*/
extern "C"
void _CudaMakeMergeBlockIndex(int devID,
int * blockIndex, int blockNum, int blockNumInMerge,
int splitSizeInGrid, int gridSize, int gridNum)
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* set target data block index for the data movement in split */
extern "C"
void _CudaMakeMergeBlockIndex(int devID, int * blockIndex, int blockNum, int blockNumInMerge,
int splitSizeInGrid, int gridSize, int gridNum);
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* set target data block index for the data movement in merge */
extern "C"
void _MakeMergeBlockIndex(int * blockIndex, int blockNum, int blockNumInMerge,
int splitSizeInGrid, int gridSize, int gridNum, XMem * mem);
......
......@@ -57,7 +57,6 @@ set target data block index for the data movement in split
>> blockSplitSize - size of the splitted block
>> blockNum - number of data blocks
*/
extern "C"
void _CudaMakeSplitBlockIndex(int devID, int * blockIndex, int splitNum, int blockSplitSize, int blockNum)
{
int cudaGrids[3];
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* set target data block index for the data movement in split */
extern "C"
void _CudaMakeSplitBlockIndex(int devID, int * blockIndex, int splitNum, int blockSplitSize, int blockNum);
#endif // USE_CUDA
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* set target data block index for the data movement in split */
extern "C"
void _MakeSplitBlockIndex(int * blockIndex, int splitNum, int blockSplitSize, int blockNum, XMem * mem);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -99,8 +99,7 @@ void _Merge(const XTensor * s, XTensor * t, int whereToMerge, int leadingDim)
char * sData = (char*)s->data + g * blockSize * blockNum * s->unitSize;
for (int k = 0; k < mergedNum; k++) {
XMemCopy2D(tData + k * tStep, tPtich, t->devID,
sData + k * sStep, sPitch, s->devID,
mSize, n);
sData + k * sStep, sPitch, s->devID, mSize, n);
}
}
}
......
......@@ -71,7 +71,6 @@ merge data by blocks (cuda version)
>> target - target data array
>> myMem - the memory pool
*/
extern "C"
void _CudaMergeBlockLists(const XList * sourceList, int * blockSizes, int blockNum, void * target, XMem * myMem)
{
CheckNTErrors((myMem != NULL), "No memory pool!");
......
......@@ -33,7 +33,6 @@ __global__
void KernelCopyBlockLists(DTYPE ** sourceList, int * sourceBlockSizes, int sourceBlockNum, DTYPE ** targetList);
/* merge data by blocks (cuda version) */
extern "C"
void _CudaMergeBlockLists(const XList * sourceList, int * blockSizes, int blockNum, void * target, XMem * myMem);
#endif // USE_CUDA
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* merge data by blocks */
extern "C"
void _MergeBlockLists(const XList * sourceList, int * blockSizes, int blockNum, void * target, XMem * myMem);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -66,7 +66,6 @@ insert a dimension by copying the blocks for x times (where x is the size of the
>> dim - where to insert the dimension
>> dSize - size of the newly-inserted dimension
*/
extern "C"
void _CudaUnsqueeze(const XTensor * a, XTensor * b, int dim, int dSize)
{
int blockSize = 1;
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* duplicate the data along a given dimension */
extern "C"
void _CudaUnsqueeze(const XTensor * a, XTensor * b, int dim, int dSize);
#endif // USE_CUDA
......
......@@ -20,6 +20,7 @@
*/
#include "../../XTensor.h"
#include "../movement/CopyValues.h"
#include "../../XUtility.h"
#include "../../XName.h"
#include "Sort.h"
......@@ -63,15 +64,15 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
blockNum *= a->dimSizeRDI[i];
int blockSize = stride * strideNum;
_CopyValues(a, b);
for (int k = 0; k < blockNum; k++) {
for (int i = 0; i < stride; i++) {
void * dataA = (char*)a->data + (k * blockSize + i) * a->unitSize;
void * dataB = (char*)b->data + (k * blockSize + i) * b->unitSize;
void * indexData = (char*)index->data + (k * blockSize + i) * sizeof(int);
/* we sort the data array along "dim" */
if (a->dataType == X_FLOAT)
XQSort(dataA, dataB, indexData, strideNum, a->unitSize, stride, CompXFloat);
XQSort(dataB, indexData, strideNum, a->unitSize, stride, CompXFloat);
else {
ShowNTErrors("TODO!");
}
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* sort the tensor along a given dimension */
extern "C"
void _CudaSortBig(const XTensor * a, XTensor * b, XTensor * indexA, XTensor * indexB, int dim, int k = -1);
#endif // USE_CUDA
......
......@@ -39,7 +39,6 @@ void _SortMe(XTensor * a, XTensor * index, int dim);
sort the data along a given dimension (return a XTensor structure)
make a new tensor to keep the result and return it
*/
extern "C"
void Sort(XTensor & a, XTensor & b, XTensor & index, int dim);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* get the top-k items along a given dimension */
extern "C"
void _CudaTopK(const XTensor * a, XTensor * b, XTensor * index, int dim, int k);
#endif // USE_CUDA
......
......@@ -63,7 +63,6 @@ set the cell to the ascending order along a given dimension
>> a - the tensor
>> dim - the dimension
*/
extern "C"
void CudaSetAscendingOrder(XTensor * a, int dim)
{
CheckNTErrors((a->dataType == X_INT), "TODO!");
......
......@@ -29,7 +29,6 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* set the cell to the ascending order along a given dimension */
extern "C"
void CudaSetAscendingOrder(XTensor * a, int dim);
#endif // USE_CUDA
......
......@@ -28,15 +28,12 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
/* segmentation and parallel processing for 2d tensors (i.e., matrices) */
/* segment a 2d tensor (i.e., matrix) into blocks and run jobs in parallel */
extern "C"
void RunParallel2D(XPRunner * parallelRunner, void * job, int opNum, int rowNum, int colNum, int argNum, ...);
/* segment a block into sub-blocks */
extern "C"
int SegmentTensor2D(int rowNum, int colNum, int blockNum, int * blockIndex);
/* segment a block into sub-blocks */
extern "C"
int SegmentTensor2DInRows(int rowNum, int colNum, int blockNum, int * blockIndex);
} // namespace nts(NiuTrans.Tensor)
......
......@@ -35,11 +35,9 @@ y = 1 if x > 1
x if -1 <= x <= 1
-1 if x < -1
*/
extern "C"
void _CudaHardTanH(const XTensor * input, XTensor * output);
/* de/dx (Cuda version) */
extern "C"
void _CudaHardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName);
......
......@@ -190,7 +190,7 @@ set dE/dx = exp(y)
>> size - size of output
>> lossName - name of the loss function
*/
extern "C" __global__
__global__
void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......
......@@ -223,7 +223,7 @@ backward compuation for squared error (Cuda kernel)
>> y - model output (in vector)
>> size - size of the vector (dedy)
*/
extern "C" __global__
__global__
void KernelLossBackwardSquaredError(DTYPE * dedy, DTYPE * t, DTYPE * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -243,7 +243,7 @@ backward compuation of blocks for squared error (Cuda kernel)
>> lenInBlock - number of items in a block for computation
>> size - size of the vector (dedy)
*/
extern "C" __global__
__global__
void KernelLossBackwardSquaredErrorBlock(DTYPE * dedy, DTYPE * t, DTYPE * y,
int blockSize, int begInBlock, int lenInBlock, int size)
{
......@@ -266,7 +266,7 @@ backward compuation for cross entropy (Cuda kernel)
>> y - model output (in vector)
>> size - size of the vector (dedy)
*/
extern "C" __global__
__global__
void KernelLossBackwardCrossEntropy(DTYPE * dedy, DTYPE * t, DTYPE * y, int tBeg, int tLen, int yBeg, int blockNum, int stride, int dimensionSize)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -298,7 +298,7 @@ backward compuation for cross entropy (Cuda kernel)
>> lenInBlock - number of items in a block for computation
>> size - size of the vector (dedy)
*/
extern "C" __global__
__global__
void KernelLossBackwardCrossEntropyBlock(DTYPE * dedy, DTYPE * t, DTYPE * y,
int blockSize, int begInBlock, int lenInBlock, int size)
{
......
......@@ -30,21 +30,17 @@ namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* compute the loss (cuda version) */
extern "C"
DTYPE _CudaLossCompute(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
bool isLogOutput, int leadDim, int gBeg, int gLen, int oBeg);
/* compute the loss in log scale (cuda version) */
extern "C"
DTYPE _CudaLossComputeForLogScale(XTensor * gold, XTensor * output, LOSS_FUNCTION_NAME LFName,
int leadDim, int gBeg, int gLen, int oBeg);
/* backward compuation for a single element (cuda version) */
extern "C"
DTYPE _CudaLossBackwardPoint(DTYPE t, DTYPE y, LOSS_FUNCTION_NAME LFName);
/* backward compuation for (dense) vectors (cuda version) */
extern "C"
void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y,
LOSS_FUNCTION_NAME LFName,
int leadDim = -1, int tBeg = 0, int tLen = -1, int yBeg = 0);
......
......@@ -30,11 +30,9 @@ namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* rectify function y = max(0, x) (Cuda version) */
extern "C"
void _CudaRectify(const XTensor * input, XTensor * output);
/* de/dx (Cuda version) */
extern "C"
void _CudaRectifyBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName);
......
......@@ -30,11 +30,9 @@ namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* rectify function y = max(0, x) (Cuda version) */
extern "C"
void _CudaSigmoid(const XTensor * input, XTensor * output);
/* de/dx (Cuda version) */
extern "C"
void _CudaSigmoidBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName);
......
......@@ -30,15 +30,12 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* softmax y = e^x / \sum_{i} e^{x_i} (Cuda version) */
extern "C"
void _CudaSoftmax(const XTensor * input, XTensor * output, int leadDim);
/* softmax y = e^x / \sum_{i} e^{x_i} (Cuda version) */
extern "C"
void _CudaSoftmaxSumMax(const XTensor * x, XTensor * y, int leadDim, XTensor * sum, XTensor * max);
/* de/dx (Cuda version) */
extern "C"
void _CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
int leadDim,
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Absolute Function */
extern "C"
bool TestAbsolute();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Concatenate Function */
extern "C"
bool TestConcatenate();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ConcatenateSolely Function */
extern "C"
bool TestConcatenateSolely();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for ConvertDataType Function */
extern "C"
bool TestConvertDataType();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for CopyIndexed Function */
extern "C"
bool TestCopyIndexed();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for CopyValues Function */
extern "C"
bool TestCopyValues();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for HardTanH Function */
extern "C"
bool TestHardTanH();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Identity Function */
extern "C"
bool TestIdentity();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Log Function */
extern "C"
bool TestLog();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for LogSoftmax Function */
extern "C"
bool TestLogSoftmax();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for Loss Function */
extern "C"
bool TestLoss();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMul Function */
extern "C"
bool TestMatrixMul();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -248,7 +248,6 @@ bool TestMatrixMul2D2()
*/
/* test for MatrixMul2D Function */
extern "C"
bool TestMatrixMul2D()
{
XPRINT(0, stdout, "[TEST MATRIXMUL2D] matrix multiplication (for 2d tensors) \n");
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMul2D Function */
extern "C"
bool TestMatrixMul2D();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -27,7 +27,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for MatrixMul2DParallel Function */
extern "C"
bool TestMatrixMul2DParallel();
} // namespace nts(NiuTrans.Tensor)
......
......@@ -210,14 +210,14 @@ bool TestSort()
XPRINT(0, stdout, "[TEST SORT] sort the tensor along a given dimension \n");
bool returnFlag = true, caseFlag = true;
///* case 1 test */
//caseFlag = TestSort1();
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 1 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 1 test */
caseFlag = TestSort1();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 1 failed!\n");
}
else
XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestSort2();
......
......@@ -69,7 +69,6 @@
namespace nts { // namespace nts(NiuTrans.Tensor)
/* test for all Function */
extern "C"
bool Test();
} // namespace nts(NiuTrans.Tensor)
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论