Commit b7c25dee by liyinqiao

Reorganize codes

1. Move the SetAscendingOrder functions from XTensor.*  to core/utilities/SetAscendingOder.*
2. Rename XElement.* files to XCall.*
parent 79c659b5
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-21
*/
#include "XTensor.h"
#include "XCall.h"
#include "XDevice.h"
#include "XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*************************************************
* we define the "new and delete" functions below
*/
/*
initialize a tensor
>> tensor - the tensor we intend to initialize
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool
*/
void InitTensor(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const float myDenseRatio, const int myDevID, XMem * myMem)
{
if(myMem != NULL && tensor->mem == NULL){
tensor->mem = myMem;
tensor->devID = myMem->devID;
}
if(tensor->mem != NULL){
tensor->Resize(myOrder, myDimSize, myDataType, myDenseRatio);
}
else{
int dims[MAX_TENSOR_DIM_NUM];
memcpy(dims, myDimSize, sizeof(int) * myOrder);
bool allocated = true;
for (int i = 0; i < myOrder; i++) {
if (dims[i] < 0)
allocated = false;
}
dims[0] = -abs(dims[0]);
if (myDevID == CURRENT_GPU)
tensor->devID = XDevice::GetGPUDevice();
else
tensor->devID = myDevID;
tensor->Resize(myOrder, dims, myDataType, myDenseRatio);
if(allocated)
XTensor::AllocateData(tensor);
}
}
/*
initialize a dense tensor V2
>> tensor - the tensor we intend to initialize
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
*/
void InitTensorV2(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const int myDevID, const bool isEnableGrad)
{
if (tensor->mem == NULL) {
XMem * myMem = GMems.GetMem(myDevID);
tensor->mem = myMem;
tensor->devID = myMem->devID;
}
if(tensor->mem != NULL){
tensor->Resize(myOrder, myDimSize, myDataType, 1.0F);
}
else{
int dims[MAX_TENSOR_DIM_NUM];
memcpy(dims, myDimSize, sizeof(int) * myOrder);
bool allocated = true;
for (int i = 0; i < myOrder; i++) {
if (dims[i] < 0)
allocated = false;
}
dims[0] = -abs(dims[0]);
if (myDevID == CURRENT_GPU)
tensor->devID = XDevice::GetGPUDevice();
else
tensor->devID = myDevID;
tensor->Resize(myOrder, dims, myDataType, 1.0F);
if(allocated)
XTensor::AllocateData(tensor);
}
tensor->enableGrad = isEnableGrad;
}
/*
initialize a dense tensor
>> tensor - the tensor we intend to initialize
>> num - number of elements
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool
*/
void InitTensor1D(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[1];
dims[0] = num;
InitTensor(tensor, 1, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
initialize a dense tensor V2
>> tensor - the tensor we intend to initialize
>> num - number of elements
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
*/
void InitTensor1DV2(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[1];
dims[0] = num;
InitTensorV2(tensor, 1, dims, myDataType, myDevID, isEnableGrad);
}
/*
initialize a dense matrix
>> tensor - the tensor we intend to initialize
>> rowNum - number of rows
>> colNum - number of columns
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool
*/
void InitTensor2D(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[2];
dims[0] = rowNum;
dims[1] = colNum;
InitTensor(tensor, 2, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
initialize a dense matrix V2
>> tensor - the tensor we intend to initialize
>> rowNum - number of rows
>> colNum - number of columns
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
*/
void InitTensor2DV2(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[2];
dims[0] = rowNum;
dims[1] = colNum;
InitTensorV2(tensor, 2, dims, myDataType, myDevID, isEnableGrad);
}
/*
initialize a dense 3d tensor
>> tensor - the tensor we intend to initialize
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool
*/
void InitTensor3D(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[3];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
InitTensor(tensor, 3, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
initialize a dense 3d tensor V2
>> tensor - the tensor we intend to initialize
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
*/
void InitTensor3DV2(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[3];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
InitTensorV2(tensor, 3, dims, myDataType, myDevID, isEnableGrad);
}
/*
initialize a dense 4d tensor
>> tensor - the tensor we intend to initialize
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool
*/
void InitTensor4D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[4];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
InitTensor(tensor, 4, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
initialize a dense 4d tensor V2
>> tensor - the tensor we intend to initialize
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
*/
void InitTensor4DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[4];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
InitTensorV2(tensor, 4, dims, myDataType, myDevID, isEnableGrad);
}
/*
initialize a dense 5d tensor
>> tensor - the tensor we intend to initialize
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> d4 - size of dimension 4
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool
*/
void InitTensor5D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[5];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
dims[4] = d4;
InitTensor(tensor, 5, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
initialize a dense 5d tensor V2
>> tensor - the tensor we intend to initialize
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> d4 - size of dimension 4
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
*/
void InitTensor5DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[5];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
dims[4] = d4;
InitTensorV2(tensor, 5, dims, myDataType, myDevID, isEnableGrad);
}
/*
initialize a tensor with a reference tensor
>> tensor - the tensor we intend to initialize
>> reference - the reference tensor
*/
void InitTensor(XTensor * tensor, const XTensor * reference)
{
if(reference->order < 0)
return;
tensor->enableGrad = reference->enableGrad;
InitTensor(tensor, reference->order, reference->dimSize,
reference->dataType, reference->denseRatio,
reference->devID, reference->mem);
}
/*
initialize a tensor with a reference tensor V2
>> tensor - the tensor we intend to initialize
>> reference - the reference tensor
*/
void InitTensorV2(XTensor * tensor, const XTensor * reference)
{
if(reference->order < 0)
return;
tensor->enableGrad = reference->enableGrad;
InitTensorV2(tensor, reference->order, reference->dimSize,
reference->dataType, reference->devID);
}
/*
initialize a tensor on the CPU with a reference tensor
>> tensor - the tensor we intend to initialize
>> reference - the reference tensor
*/
void InitTensorOnCPU(XTensor * tensor, const XTensor * reference)
{
if(reference->order < 0)
return;
tensor->enableGrad = reference->enableGrad;
InitTensorV2(tensor, reference->order, reference->dimSize,
reference->dataType, -1);
}
/* generate a XTensor with no initialization */
XTensor * NewTensor()
{
XTensor * tensor = new XTensor();
return tensor;
}
/*
generate a XTensor
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool.
*/
XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const float myDenseRatio, const int myDevID, XMem * myMem)
{
if(myMem != NULL)
return new XTensor(myOrder, myDimSize, myDataType, myDenseRatio, myDevID, myMem);
else{
XTensor * tensor = new XTensor();
InitTensor(tensor, myOrder, myDimSize, myDataType, myDenseRatio, myDevID, myMem);
return tensor;
}
}
/*
generate a dense XTensor V2
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site.
*/
XTensor * NewTensorV2(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const int myDevID, const bool isEnableGrad)
{
XMem * myMem = GMems.GetMem(myDevID);
XTensor * tensor = new XTensor(myOrder, myDimSize, myDataType, 1.0F, myDevID, myMem);
tensor->enableGrad = isEnableGrad;
return tensor;
}
/*
generate a XTensor which allocates data on the buffer
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> myMem - memory pool used to allocating the data array.
we actually allocate the data on the buffer associated with
the memory pool
>> devID - device id
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
*/
XTensor * NewTensorBuf(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType, const float myDenseRatio,
const int devID, XMem * myMem)
{
int dims[MAX_TENSOR_DIM_NUM];
memcpy(dims, myDimSize, sizeof(int) * myOrder);
dims[0] = -abs(dims[0]);
XTensor * tensor = NewTensor(myOrder, dims, myDataType, myDenseRatio, devID, myMem);
if (tensor->unitNum * tensor->unitSize == 176657664) {
tensor->Dump(stderr, "", 200);
}
if(myMem != NULL)
tensor->data = myMem->AllocBuf(myMem->devID, tensor->unitNum * tensor->unitSize);
else
tensor->data = XMemAlloc(devID, tensor->unitNum * tensor->unitSize);
return tensor;
}
/*
generate a dense XTensor which allocates data on the buffer V2
>> myOrder - order of the tensor
>> myDimSize - the size of each dimension
>> devID - device id
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
*/
XTensor * NewTensorBufV2(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType, const int devID, const bool isEnableGrad)
{
int dims[MAX_TENSOR_DIM_NUM];
memcpy(dims, myDimSize, sizeof(int) * myOrder);
dims[0] = -abs(dims[0]);
XTensor * tensor = NewTensorV2(myOrder, dims, myDataType, devID, isEnableGrad);
if (tensor->unitNum * tensor->unitSize == 176657664) {
tensor->Dump(stderr, "", 200);
}
XMem * myMem = GMems.GetMem(devID);
tensor->data = myMem->AllocBuf(myMem->devID, tensor->unitNum * tensor->unitSize);
return tensor;
}
/*
generate a XTensor which allocates data on the buffer
>> reference - reference tensor
>> devID - device id
>> myMem - memory pool used to allocating the data array.
we actually allocate the data on the buffer associated with
the memory pool
*/
XTensor * NewTensorBuf(const XTensor * reference, int devID, XMem * myMem)
{
return NewTensorBuf(reference->order, reference->dimSize,
reference->dataType, reference->denseRatio,
devID, myMem);
}
/*
generate a XTensor which allocates data on the buffer V2
>> reference - reference tensor
>> devID - device id
*/
XTensor * NewTensorBufV2(const XTensor * reference, int devID, const bool isEnableGrad)
{
return NewTensorBufV2(reference->order, reference->dimSize,
reference->dataType, devID, isEnableGrad);
}
/*
generate a dense vector
>> num - number of entries
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool.
*/
XTensor * NewTensor1D(const int num,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[1];
dims[0] = num;
return NewTensor(1, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
generate a dense vector V2
>> num - number of entries
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site.
*/
XTensor * NewTensor1DV2(const int num,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[1];
dims[0] = num;
return NewTensorV2(1, dims, myDataType, myDevID, isEnableGrad);
}
/*
generate a dense matrix
>> rowNum - number of rows
>> colNum - number of colums
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool.
*/
XTensor * NewTensor2D(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[2];
dims[0] = rowNum;
dims[1] = colNum;
return NewTensor(2, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
generate a dense matrix V2
>> rowNum - number of rows
>> colNum - number of colums
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site.
*/
XTensor * NewTensor2DV2(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[2];
dims[0] = rowNum;
dims[1] = colNum;
return NewTensorV2(2, dims, myDataType, myDevID, isEnableGrad);
}
/*
generate a dense 3d tensor
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool.
*/
XTensor * NewTensor3D(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[3];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
return NewTensor(3, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
generate a dense 3d tensor V2
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site.
*/
XTensor * NewTensor3DV2(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[3];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
return NewTensorV2(3, dims, myDataType, myDevID, isEnableGrad);
}
/*
generate a dense 4d tensor
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool.
*/
XTensor * NewTensor4D(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[4];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
return NewTensor(4, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
generate a dense 4d tensor V2
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site.
*/
XTensor * NewTensor4DV2(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[4];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
return NewTensorV2(4, dims, myDataType, myDevID, isEnableGrad);
}
/*
generate a dense 5d tensor
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> d4 - size of dimension 4
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site
>> myMem - memory pool used to allocating the data array
myMem = NULL means that the tensor is allocated on
the device dynamically, rather than on the memory pool.
*/
XTensor * NewTensor5D(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType, const int myDevID, XMem * myMem)
{
int dims[5];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
dims[4] = d4;
return NewTensor(5, dims, myDataType, 1.0F, myDevID, myMem);
}
/*
generate a dense 5d tensor V2
>> d0 - size of dimension 0
>> d1 - size of dimension 1
>> d2 - size of dimension 2
>> d3 - size of dimension 3
>> d4 - size of dimension 4
>> myDataType - unit size (e.g., int, float, and double)
>> myDevID - when myMem is NULL, myDevID specifies the device
on which we allocate the data on site.
*/
XTensor * NewTensor5DV2(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int dims[5];
dims[0] = d0;
dims[1] = d1;
dims[2] = d2;
dims[3] = d3;
dims[4] = d4;
return NewTensorV2(5, dims, myDataType, myDevID, isEnableGrad);
}
XTensor * NewTensorRange(int lower, int upper, int step, const TENSOR_DATA_TYPE myDataType, const int myDevID, const bool isEnableGrad)
{
int size = abs(upper - lower);
int unitNum = ceil(1.0 * size / abs(step));
XTensor * tensor = NewTensor1DV2(unitNum, myDataType, myDevID, isEnableGrad);
tensor->Range(lower, upper, step);
return tensor;
}
/*
generate a copy of XTensor
>> a - the tensor we copy from
>> isFilledData - indicates whether we allocate the data for
the newly-generated tensor
*/
XTensor * NewTensor(const XTensor * a, bool isFilledData)
{
int dims[MAX_TENSOR_DIM_NUM];
CheckNTErrors((a != NULL), "Empty input!");
memset(dims, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
if(a->order > 0)
memcpy(dims, a->dimSize, sizeof(int) * a->order);
if(!isFilledData)
dims[0] = -dims[0];
XTensor * newTensor = new XTensor(a->order, dims,
a->dataType, a->denseRatio,
a->devID, a->mem);
return newTensor;
}
/*
free the data space of a given tensor
>> tensor - pointer to the tensor
*/
void DelTensor(XTensor * tensor)
{
delete tensor;
}
/*
free the data space of a given tensor (on the buffer)
>> tensor - pointer to the tensor
*/
void DelTensorBuf(XTensor * tensor)
{
if(tensor->mem != NULL)
tensor->mem->ReleaseBuf(tensor->devID, tensor->unitNum * tensor->unitSize);
else
XMemFree(tensor->devID, tensor->data);
tensor->data = NULL;
delete tensor;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-21
*/
#ifndef __XCALL_H__
#define __XCALL_H__
#include "XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
* we define the "new and delete" functions below
*/
/* initialize a XTensor */
void InitTensor(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense XTensor V2 */
void InitTensorV2(XTensor * tensor,
const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense vector */
void InitTensor1D(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense vector V2 */
void InitTensor1DV2(XTensor * tensor, const int num,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense matrix */
void InitTensor2D(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense matrix V2 */
void InitTensor2DV2(XTensor * tensor, const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 3d tensor */
void InitTensor3D(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 3d tensor V2 */
void InitTensor3DV2(XTensor * tensor, const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 4d tensor */
void InitTensor4D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 4d tensor V2 */
void InitTensor4DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a dense 5d tensor */
void InitTensor5D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
/* initialize a dense 5d tensor V2 */
void InitTensor5DV2(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* initialize a tensor with a reference tensor */
void InitTensor(XTensor * tensor, const XTensor * reference);
/* initialize a tensor with a reference tensor */
void InitTensorV2(XTensor * tensor, const XTensor * reference);
/* initialize a tensor on the CPU with a reference tensor */
void InitTensorOnCPU(XTensor * tensor, const XTensor * reference);
/* generate a XTensor with no initialization */
XTensor * NewTensor();
/* generate a XTensor */
XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor V2 */
XTensor * NewTensorV2(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const float myDenseRatio = 1.0F,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const int myOrder, const int * myDimSize,
const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const XTensor * reference, int devID, XMem * myMem);
/* generate a XTensor which allocates data on the buffer V2 */
XTensor * NewTensorBufV2(const XTensor * reference, int devID, const bool isEnableGrad = true);
/* generate a dense vector */
XTensor * NewTensor1D(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1,
XMem * myMem = NULL);
/* generate a dense vector V2 */
XTensor * NewTensor1DV2(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense matrix */
XTensor * NewTensor2D(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense matrix V2 */
XTensor * NewTensor2DV2(const int rowNum, const int colNum,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 3d tensor */
XTensor * NewTensor3D(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 3d tensor V2 */
XTensor * NewTensor3DV2(const int d0, const int d1, const int d2,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 4d tensor */
XTensor * NewTensor4D(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 4d tensor V2 */
XTensor * NewTensor4DV2(const int d0, const int d1, const int d2, const int d3,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense 5d tensor */
XTensor * NewTensor5D(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, XMem * myMem = NULL);
/* generate a dense 5d tensor V2 */
XTensor * NewTensor5DV2(const int d0, const int d1, const int d2, const int d3, const int d4,
const TENSOR_DATA_TYPE myDataType = X_FLOAT,
const int myDevID = -1, const bool isEnableGrad = true);
/* generate a dense vector by range */
XTensor * NewTensorRange(int lower, int upper, int step, const TENSOR_DATA_TYPE myDataType = X_INT, const int myDevID = -1, const bool isEnableGrad = true);
/* generate a copy of XTensor (with a reference to a given tensor) */
XTensor * NewTensor(const XTensor * a, bool isFilledData = true);
/* free the data space of a given tensor */
void DelTensor(XTensor * tensor);
/* free the data space of a given tensor (on the buffer) */
void DelTensorBuf(XTensor * tensor);
} // namespace nts(NiuTrans.Tensor)
#endif // __XCALL_H__
\ No newline at end of file
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include <stdarg.h> #include <stdarg.h>
#include <time.h> #include <time.h>
#include "XTensor.h" #include "XTensor.h"
#include "XElement.h" #include "XCall.h"
#include "XGlobal.h" #include "XGlobal.h"
#include "XUtility.h" #include "XUtility.h"
#include "XDevice.h" #include "XDevice.h"
...@@ -958,55 +958,6 @@ void XTensor::SetDataPointer() ...@@ -958,55 +958,6 @@ void XTensor::SetDataPointer()
} }
/* /*
set the cell to the ascending order along a given dimension
>> dim - the dimension specified
*/
void XTensor::SetAscendingOrder(int dim)
{
CheckNTErrors(dim < order, "Wrong dimension specified!");
CheckNTErrors(dataType == X_INT, "TODO!");
if(dim < 0){
int o = order;
int ds[MAX_TENSOR_DIM_NUM];
memcpy(ds, dimSize, sizeof(int) * order);
Reshape(unitNum);
SetAscendingOrder(0);
Reshape(o, ds);
return;
}
int dimRDI = order - dim - 1;
if(devID >= 0){
#ifdef USE_CUDA
CudaSetAscendingOrder(this, dim);
#else
ShowNTErrors("Plesae specify USE_CUDA and recompile the code!");
#endif
}
else{
int stride = 1;
int strideNum = dimSizeRDI[dimRDI];
for(int i = 0; i < dimRDI; i++)
stride *= dimSizeRDI[i];
int blockNum = 1;
for(int i = dimRDI + 1; i < order; i++)
blockNum *= dimSizeRDI[i];
for(int k = 0; k < blockNum; k++){
for(int j = 0; j < strideNum; j++){
int * d = (int*)data + stride * strideNum * k + stride * j;
for(int i = 0; i < stride; i++)
d[i] = j;
}
}
}
}
/*
get the value of a cell with the index get the value of a cell with the index
>> index - index of each dimension >> index - index of each dimension
>> size - size of the index >> size - size of the index
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include "XDataType.h" #include "XDataType.h"
#include "XMem.h" #include "XMem.h"
#include "XLink.h" #include "XLink.h"
#include "XElement.h" #include "XCall.h"
/* the nts (NiuTrans.Tensor) namespace */ /* the nts (NiuTrans.Tensor) namespace */
namespace nts{ namespace nts{
...@@ -321,9 +321,6 @@ public: ...@@ -321,9 +321,6 @@ public:
/* set the pointer to "data" */ /* set the pointer to "data" */
void SetDataPointer(); void SetDataPointer();
/* set the cell to the ascending order along a given dimension */
void SetAscendingOrder(int dim);
/* get the value of a cell with the index */ /* get the value of a cell with the index */
DTYPE Get(int index[], int size = -1) const; DTYPE Get(int index[], int size = -1) const;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "CopyBlocks.h" #include "CopyBlocks.h"
#include "Gather.h" #include "Gather.h"
#include "../../XName.h" #include "../../XName.h"
#include "../utilities/SetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -206,7 +207,7 @@ void _CopyIndexed(const XTensor * s, XTensor * t, int dim, ...@@ -206,7 +207,7 @@ void _CopyIndexed(const XTensor * s, XTensor * t, int dim,
const XTensor * srcIndex, int copyNum) const XTensor * srcIndex, int copyNum)
{ {
XTensor * tgtIndex = NewTensor(srcIndex); XTensor * tgtIndex = NewTensor(srcIndex);
tgtIndex->SetAscendingOrder(0); SetAscendingOrder(*tgtIndex, 0);
_CopyIndexed(s, t, dim, srcIndex, tgtIndex, copyNum); _CopyIndexed(s, t, dim, srcIndex, tgtIndex, copyNum);
delete tgtIndex; delete tgtIndex;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../movement/CopyValues.h" #include "../movement/CopyValues.h"
#include "../shape/IsSameShaped.h" #include "../shape/IsSameShaped.h"
#include "../utilities/SetAscendingOrder.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XName.h" #include "../../XName.h"
#include "Sort.h" #include "Sort.h"
...@@ -48,7 +49,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim) ...@@ -48,7 +49,7 @@ void _Sort(const XTensor * a, XTensor * b, XTensor * index, int dim)
int dimRDI = a->order - dim - 1; int dimRDI = a->order - dim - 1;
/* make the index tensor */ /* make the index tensor */
index->SetAscendingOrder(dim); SetAscendingOrder(*index, dim);
if (a->devID >= 0) { if (a->devID >= 0) {
#ifdef USE_CUDA #ifdef USE_CUDA
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "../../XDevice.h" #include "../../XDevice.h"
#include "../../XUtility.h" #include "../../XUtility.h"
#include "../../XTensor.h" #include "../../XTensor.h"
#include "../utilities/SetAscendingOrder.h"
#include "TopK.h" #include "TopK.h"
#include "TopK.cuh" #include "TopK.cuh"
#include "Sort.cuh" #include "Sort.cuh"
...@@ -862,7 +863,7 @@ void _CudaTopK(const XTensor * a, XTensor * b, XTensor * index, int dim, int k) ...@@ -862,7 +863,7 @@ void _CudaTopK(const XTensor * a, XTensor * b, XTensor * index, int dim, int k)
//indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int)); //indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int));
/* make the index tensor */ /* make the index tensor */
//indexA->SetAscendingOrder(dim); //SetAscendingOrder(*indexA, dim);
//_CudaSortBig(a, b, indexA, index, dim, k); //_CudaSortBig(a, b, indexA, index, dim, k);
......
...@@ -39,6 +39,7 @@ bool IsFloatEqual(DTYPE a, DTYPE b, float absError, float relError) ...@@ -39,6 +39,7 @@ bool IsFloatEqual(DTYPE a, DTYPE b, float absError, float relError)
} }
/* check whether the data array is the same as the answer /* check whether the data array is the same as the answer
>> tensor - input tensor
>> d - input data (it must be on CPUs) >> d - input data (it must be on CPUs)
>> num - number of data items >> num - number of data items
>> beg - where we start this in the data array of the tensor >> beg - where we start this in the data array of the tensor
...@@ -67,8 +68,13 @@ bool _CheckData(const XTensor * tensor, const void * d, int num, int beg) ...@@ -67,8 +68,13 @@ bool _CheckData(const XTensor * tensor, const void * d, int num, int beg)
return true; return true;
} }
/* check whether the data array is the same as the "answer" */ /* check whether the data array is the same as the answer
bool _CheckData(const XTensor * tensor, const void * d, int num, float tolerance, int beg) >> tensor - input tensor
>> d - input data (it must be on CPUs)
>> num - number of data items
>> tolerance - error value we tolerant between result and answer
>> beg - where we start this in the data array of the tensor
*/bool _CheckData(const XTensor * tensor, const void * d, int num, float tolerance, int beg)
{ {
if (tensor->data == NULL || d == NULL) if (tensor->data == NULL || d == NULL)
return false; return false;
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-23
*/
#include "../../XTensor.h"
#include "SetAscendingOrder.cuh"
#include "SetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/*
set the cell to the ascending order along a given dimension
>> tensor - input tensor
>> dim - the dimension specified
*/
void SetAscendingOrder(XTensor & tensor, int dim)
{
CheckNTErrors(dim < tensor.order, "Wrong dimension specified!");
CheckNTErrors(tensor.dataType == X_INT, "TODO!");
if(dim < 0){
int o = tensor.order;
int ds[MAX_TENSOR_DIM_NUM];
memcpy(ds, tensor.dimSize, sizeof(int) * tensor.order);
tensor.Reshape(tensor.unitNum);
SetAscendingOrder(tensor, 0);
tensor.Reshape(o, ds);
return;
}
int dimRDI = tensor.order - dim - 1;
if(tensor.devID >= 0){
#ifdef USE_CUDA
CudaSetAscendingOrder(&tensor, dim);
#else
ShowNTErrors("Plesae specify USE_CUDA and recompile the code!");
#endif
}
else{
int stride = 1;
int strideNum = tensor.dimSizeRDI[dimRDI];
for(int i = 0; i < dimRDI; i++)
stride *= tensor.dimSizeRDI[i];
int blockNum = 1;
for(int i = dimRDI + 1; i < tensor.order; i++)
blockNum *= tensor.dimSizeRDI[i];
for(int k = 0; k < blockNum; k++){
for(int j = 0; j < strideNum; j++){
int * d = (int*)tensor.data + stride * strideNum * k + stride * j;
for(int i = 0; i < stride; i++)
d[i] = j;
}
}
}
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2019-10-23
*/
#ifndef __SETASCENDINGORDER_H__
#define __SETASCENDINGORDER_H__
#include "../../XTensor.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
/* set the cell to the ascending order along a given dimension */
void SetAscendingOrder(XTensor & tensor, int dim);
} // namespace nts(NiuTrans.Tensor)
#endif // __SETASCENDINGORDER_H__
\ No newline at end of file
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include "../core/utilities/CheckData.h" #include "../core/utilities/CheckData.h"
#include "../core/utilities/SetAscendingOrder.h"
#include "TSetAscendingOrder.h" #include "TSetAscendingOrder.h"
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
...@@ -50,7 +51,7 @@ bool TestSetAscendingOrder1() ...@@ -50,7 +51,7 @@ bool TestSetAscendingOrder1()
s->SetZeroAll(); s->SetZeroAll();
/* call SetAscendingOrder function */ /* call SetAscendingOrder function */
s->SetAscendingOrder(1); SetAscendingOrder(*s, 1);
/* check results */ /* check results */
cpuTest = _CheckData(s, answer, sUnitNum); cpuTest = _CheckData(s, answer, sUnitNum);
...@@ -66,7 +67,7 @@ bool TestSetAscendingOrder1() ...@@ -66,7 +67,7 @@ bool TestSetAscendingOrder1()
sGPU->SetZeroAll(); sGPU->SetZeroAll();
/* call SetAscendingOrder function */ /* call SetAscendingOrder function */
sGPU->SetAscendingOrder(1); SetAscendingOrder(*sGPU, 1);
/* check results */ /* check results */
gpuTest = _CheckData(sGPU, answer, sUnitNum); gpuTest = _CheckData(sGPU, answer, sUnitNum);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论