Commit b7c25dee by liyinqiao

Reorganize codes

1. Move the SetAscendingOrder functions from XTensor.*  to core/utilities/SetAscendingOder.*
2. Rename XElement.* files to XCall.*
parent 79c659b5
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-21
*/
#ifndef __XCALL_H__
#define __XCALL_H__
#include "XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
* we define the "new and delete" functions below
*/
/* initialize a XTensor */
void InitTensor(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense XTensor V2 */
void InitTensorV2(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense vector */
void InitTensor1D(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense vector V2 */
void InitTensor1DV2(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense matrix */
void InitTensor2D(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense matrix V2 */
void InitTensor2DV2(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 3d tensor */
void InitTensor3D(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 3d tensor V2 */
void InitTensor3DV2(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 4d tensor */
void InitTensor4D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 4d tensor V2 */
void InitTensor4DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 5d tensor */
void InitTensor5D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 5d tensor V2 */
void InitTensor5DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a tensor with a reference tensor */
void InitTensor(XTensor * tensor, const XTensor * reference);
/* initialize a tensor with a reference tensor */
void InitTensorV2(XTensor * tensor, const XTensor * reference);
/* initialize a tensor on the CPU with a reference tensor */
void InitTensorOnCPU(XTensor * tensor, const XTensor * reference);
/* generate a XTensor with no initialization */
XTensor * NewTensor();
/* generate a XTensor */
XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor V2 */
XTensor * NewTensorV2(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const float myDenseRatio = 1.0F,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const XTensor * reference, int devID, XMem * myMem);
/* generate a XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const XTensor * reference, int devID, const bool isEnableGrad = true);
/* generate a dense vector */
XTensor * NewTensor1D(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1,
XMem * myMem = NULL);
/* generate a dense vector V2 */
XTensor * NewTensor1DV2(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense matrix */
XTensor * NewTensor2D(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense matrix V2 */
XTensor * NewTensor2DV2(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 3d tensor */
XTensor * NewTensor3D(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 3d tensor V2 */
XTensor * NewTensor3DV2(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 4d tensor */
XTensor * NewTensor4D(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 4d tensor V2 */
XTensor * NewTensor4DV2(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 5d tensor */
XTensor * NewTensor5D(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 5d tensor V2 */
XTensor * NewTensor5DV2(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense vector by range */
XTensor * NewTensorRange(int lower, int upper, int step, const TENSOR_DATA_TYPE myDataType = X_INT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a copy of XTensor (with a reference to a given tensor) */
XTensor * NewTensor(const XTensor * a, bool isFilledData = true);
/* free the data space of a given tensor */
void DelTensor(XTensor * tensor);
/* free the data space of a given tensor (on the buffer) */
void DelTensorBuf(XTensor * tensor);
} // namespace nts(NiuTrans.Tensor)
#endif // __XCALL_H__
\ No newline at end of file
......@@ -32,7 +32,7 @@
#include <stdarg.h>
#include <time.h>
#include "XTensor.h"
#include "XElement.h"
#include "XCall.h"
#include "XGlobal.h"
#include "XUtility.h"
#include "XDevice.h"
......@@ -958,55 +958,6 @@ void XTensor::SetDataPointer()
}
/*
set the cell to the ascending order along a given dimension
>> dim - the dimension specified
*/
void XTensor::SetAscendingOrder(int dim)
{
CheckNTErrors(dim < order, "Wrong dimension specified!");
CheckNTErrors(dataType == X_INT, "TODO!");
if(dim < 0){
int o = order;
int ds[MAX_TENSOR_DIM_NUM];
memcpy(ds, dimSize, sizeof(int) * order);
Reshape(unitNum);
SetAscendingOrder(0);
Reshape(o, ds);
return;
}
int dimRDI = order - dim - 1;
if(devID >= 0){
#ifdef USE_CUDA
CudaSetAscendingOrder(this, dim);
#else
ShowNTErrors("Plesae specify USE_CUDA and recompile the code!");
#endif
}
else{
int stride = 1;
int strideNum = dimSizeRDI[dimRDI];
for(int i = 0; i < dimRDI; i++)
stride *= dimSizeRDI[i];
int blockNum = 1;
for(int i = dimRDI + 1; i < order; i++)
blockNum *= dimSizeRDI[i];
for(int k = 0; k < blockNum; k++){
for(int j = 0; j < strideNum; j++){
int * d = (int*)data + stride * strideNum * k + stride * j;
for(int i = 0; i < stride; i++)
d[i] = j;
}
}
}
}
/*
get the value of a cell with the index
>> index - index of each dimension
>> size - size of the index
......
......@@ -37,7 +37,7 @@
#include "XDataType.h"
#include "XMem.h"
#include "XLink.h"
#include "XElement.h"
#include "XCall.h"
/* the nts (NiuTrans.Tensor) namespace */
namespace nts{
......@@ -321,9 +321,6 @@ public:
/* set the pointer to "data" */
void SetDataPointer();
/* set the cell to the ascending order along a given dimension */
void SetAscendingOrder(int dim);
/* get the value of a cell with the index */
DTYPE Get(int index[], int size = -1) const;
......
......@@ -24,6 +24,7 @@
#include "CopyBlocks.h"
#include "Gather.h"
#include "../../XName.h"
#include "../utilities/SetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
......@@ -206,7 +207,7 @@ void _CopyIndexed(const XTensor * s, XTensor * t, int dim,
const XTensor * srcIndex, int copyNum)
{
XTensor * tgtIndex = NewTensor(srcIndex);
tgtIndex->SetAscendingOrder(0);
SetAscendingOrder(*tgtIndex, 0);
_CopyIndexed(s, t, dim, srcIndex, tgtIndex, copyNum);
delete tgtIndex;
......
......@@ -23,6 +23,7 @@
#include "../../XTensor.h"
#include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h"
#include "../utilities/SetAscendingOrder.h"
#include "../../XUtility.h"
#include "../../XName.h"
#include "Sort.h"
......@@ -48,7 +49,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
int dimRDI = a->order - dim - 1;
/* make the index tensor */
index->SetAscendingOrder(dim);
SetAscendingOrder(*index, dim);
if (a->devID >= 0) {
#ifdef USE_CUDA
......
......@@ -22,6 +22,7 @@
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "../../XTensor.h"
#include "../utilities/SetAscendingOrder.h"
#include "TopK.h"
#include "TopK.cuh"
#include "Sort.cuh"
......@@ -862,7 +863,7 @@ void _CudaTopK(const XTensor * a, XTensor * b, XTensor * index, int dim, int k)
//indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int));
/* make the index tensor */
//indexA->SetAscendingOrder(dim);
//SetAscendingOrder(*indexA, dim);
//_CudaSortBig(a, b, indexA, index, dim, k);
......
......@@ -39,6 +39,7 @@ bool IsFloatEqual(DTYPE a, DTYPE b, float absError, float relError)
}
/* check whether the data array is the same as the answer
>> tensor - input tensor
>> d - input data (it must be on CPUs)
>> num - number of data items
>> beg - where we start this in the data array of the tensor
......@@ -67,8 +68,13 @@ bool _CheckData(const XTensor * tensor, const void * d, int num, int beg)
return true;
}
/* check whether the data array is the same as the "answer" */
bool _CheckData(const XTensor * tensor, const void * d, int num, float tolerance, int beg)
/* check whether the data array is the same as the answer
>> tensor - input tensor
>> d - input data (it must be on CPUs)
>> num - number of data items
>> tolerance - error value we tolerant between result and answer
>> beg - where we start this in the data array of the tensor
*/bool _CheckData(const XTensor * tensor, const void * d, int num, float tolerance, int beg)
{
if (tensor->data == NULL || d == NULL)
return false;
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-23
*/
#include "../../XTensor.h"
#include "SetAscendingOrder.cuh"
#include "SetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
set the cell to the ascending order along a given dimension
>> tensor - input tensor
>> dim - the dimension specified
*/
void SetAscendingOrder(XTensor & tensor, int dim)
{
CheckNTErrors(dim < tensor.order, "Wrong dimension specified!");
CheckNTErrors(tensor.dataType == X_INT, "TODO!");
if(dim < 0){
int o = tensor.order;
int ds[MAX_TENSOR_DIM_NUM];
memcpy(ds, tensor.dimSize, sizeof(int) * tensor.order);
tensor.Reshape(tensor.unitNum);
SetAscendingOrder(tensor, 0);
tensor.Reshape(o, ds);
return;
}
int dimRDI = tensor.order - dim - 1;
if(tensor.devID >= 0){
#ifdef USE_CUDA
CudaSetAscendingOrder(&tensor, dim);
#else
ShowNTErrors("Plesae specify USE_CUDA and recompile the code!");
#endif
}
else{
int stride = 1;
int strideNum = tensor.dimSizeRDI[dimRDI];
for(int i = 0; i < dimRDI; i++)
stride *= tensor.dimSizeRDI[i];
int blockNum = 1;
for(int i = dimRDI + 1; i < tensor.order; i++)
blockNum *= tensor.dimSizeRDI[i];
for(int k = 0; k < blockNum; k++){
for(int j = 0; j < strideNum; j++){
int * d = (int*)tensor.data + stride * strideNum * k + stride * j;
for(int i = 0; i < stride; i++)
d[i] = j;
}
}
}
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-23
*/
#ifndef __SETASCENDINGORDER_H__
#define __SETASCENDINGORDER_H__
#include "../../XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* set the cell to the ascending order along a given dimension */
void SetAscendingOrder(XTensor & tensor, int dim);
} // namespace nts(NiuTrans.Tensor)
#endif // __SETASCENDINGORDER_H__
\ No newline at end of file
......@@ -20,6 +20,7 @@
*/
#include "../core/utilities/CheckData.h"
#include "../core/utilities/SetAscendingOrder.h"
#include "TSetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
......@@ -50,7 +51,7 @@ bool TestSetAscendingOrder1()
s->SetZeroAll();
/* call SetAscendingOrder function */
s->SetAscendingOrder(1);
SetAscendingOrder(*s, 1);
/* check results */
cpuTest = _CheckData(s, answer, sUnitNum);
......@@ -66,7 +67,7 @@ bool TestSetAscendingOrder1()
sGPU->SetZeroAll();
/* call SetAscendingOrder function */
sGPU->SetAscendingOrder(1);
SetAscendingOrder(*sGPU, 1);
/* check results */
gpuTest = _CheckData(sGPU, answer, sUnitNum);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论