Commit 9b11391e by liyinqiao

1. Merge with xiao branch; 2. Update doc

parent 1410c491
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
*/ */
#include <stdio.h> #include <stdio.h>
#include "../tensor/XTensor.h" #include "XNet.h"
//#define CRTDBG_MAP_ALLOC //#define CRTDBG_MAP_ALLOC
//#include <stdlib.h> //#include <stdlib.h>
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-12
*/
#include "XNet.h"
namespace nts{
}
\ No newline at end of file
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-12
* We expected a heavy rain today but a drizzle came down. Should I
* take a big umbrella?
*/
#include "../tensor/XTensor.h"
#include "../tensor/function/FHeader.h"
#ifndef __XNET_H__
#define __XNET_H__
namespace nts{
/* management of tensor net (or graph) */
class XNet
{
public:
/* backward propagation to obtain gradient wrt. the loss/error function */
void Backward(XTensor &root, XTensor &gold = NULLTensor, LOSS_FUNCTION_NAME loss = NOLOSS);
/* backward propagation to obtain gradient wrt. the loss/error function
with a number of root nodes */
void Backward(XList &roots, XList &golds = NULLList, LOSS_FUNCTION_NAME loss = NOLOSS);
};
}
#endif
\ No newline at end of file
...@@ -42,11 +42,14 @@ void SmallTest(); ...@@ -42,11 +42,14 @@ void SmallTest();
int main( int argc, const char ** argv ) int main( int argc, const char ** argv )
{ {
//_CrtSetBreakAlloc(78); //_CrtSetBreakAlloc(123);
/* a tiny test */ /* a tiny test */
//if(1) if(false)
// SmallTest(); SmallTest();
//_CrtDumpMemoryLeaks();
//return 0;
if(argc > 1 && !strcmp(argv[1], "-test")) if(argc > 1 && !strcmp(argv[1], "-test"))
Test(); Test();
...@@ -68,25 +71,30 @@ void SmallTest() ...@@ -68,25 +71,30 @@ void SmallTest()
{ {
XTensor a; XTensor a;
XTensor b; XTensor b;
XTensor c;
XTensor d;
InitTensor2D(&a, 2, 2); InitTensor2D(&a, 2, 2);
InitTensor2D(&b, 2, 2);
a.SetZeroAll(); a.SetZeroAll();
b.SetZeroAll();
a.Set2D(1.0F, 0, 0); a.Set2D(1.0F, 0, 0);
a.Set2D(2.0F, 1, 1); a.Set2D(2.0F, 1, 1);
b = Sum(a, Multiply(a, a)); b = Sum(a, Multiply(a, a));
XTensor c = a * b + a; /* this is prohibited !!!!!!!!!!!!! */
//XTensor c = a * b + a;
int nnn = 1; //XTensor d = a + b + c.Lin(0.5F);
XTensor d = a + b + c.Lin(0.5F); c = a * b + a;
d = a + b + c.Lin(0.5F);
XLink::CheckNetwork(&d); XLink::CheckNetwork(&d);
XLink::ShowNetwork(stderr, &b); XLink::ShowNetwork(stderr, &d);
a.Dump(stderr, "a: "); a.Dump(stderr, "a:");
b.Dump(stderr, "b: "); b.Dump(stderr, "b:");
c.Dump(stderr, "c: "); c.Dump(stderr, "c:");
d.Dump(stderr, "d: "); d.Dump(stderr, "d:");
} }
...@@ -42,7 +42,9 @@ XLink::XLink() ...@@ -42,7 +42,9 @@ XLink::XLink()
/* deconstructor */ /* deconstructor */
XLink::~XLink() XLink::~XLink()
{ {
if(tails != NULL)
delete[] tails; delete[] tails;
if(params != NULL)
delete[] (char*)params; delete[] (char*)params;
} }
...@@ -75,6 +77,39 @@ void XLink::ClearTail() ...@@ -75,6 +77,39 @@ void XLink::ClearTail()
} }
/* /*
clear the outgoing node list of tensor node
>> node - the node to be cleared
*/
void XLink::ClearOutgoing(XTensor * node)
{
if(node == NULL)
return;
XLink &outgo = node->outgo;
for(int i = 0; i < outgo.tailNum; i++){
/* for each parent node */
XTensor * parent = outgo.tails[i];
XLink &parentIncome = parent->income;
CheckNTErrors(parentIncome.tailNum > 0, "The node must have incoming edges!");
/* we check for each parent node and remove the link to current node */
for(int j = 0; j < parentIncome.tailNum; j++){
if(parentIncome.tails[j] == node){
memcpy(parentIncome.tails + j, parentIncome.tails + j + 1,
sizeof(XTensor*) * (parentIncome.tailNum - 1 - j));
parentIncome.tailNum--;
break;
}
}
}
outgo.ClearTail();
}
/*
clear the incoming node list of tensor node clear the incoming node list of tensor node
>> node - the node to be cleared >> node - the node to be cleared
*/ */
...@@ -87,7 +122,7 @@ void XLink::ClearIncoming(XTensor * node) ...@@ -87,7 +122,7 @@ void XLink::ClearIncoming(XTensor * node)
for(int i = 0; i < income.tailNum; i++){ for(int i = 0; i < income.tailNum; i++){
/* for a incoming node */ /* for each incoming node */
XTensor * child = income.tails[i]; XTensor * child = income.tails[i];
XLink &childOutgo = child->outgo; XLink &childOutgo = child->outgo;
...@@ -96,9 +131,8 @@ void XLink::ClearIncoming(XTensor * node) ...@@ -96,9 +131,8 @@ void XLink::ClearIncoming(XTensor * node)
/* we check for each child node and remove the link to current node */ /* we check for each child node and remove the link to current node */
for(int j = 0; j < childOutgo.tailNum; j++){ for(int j = 0; j < childOutgo.tailNum; j++){
if(childOutgo.tails[j] == node){ if(childOutgo.tails[j] == node){
memcpy(childOutgo.tails + j, memcpy(childOutgo.tails + j, childOutgo.tails + j + 1,
childOutgo.tails + j + 1, sizeof(XTensor*) * (childOutgo.tailNum - 1 - j));
(childOutgo.tailNum - 1 - j) * sizeof(XTensor*));
childOutgo.tailNum--; childOutgo.tailNum--;
break; break;
} }
...@@ -109,7 +143,6 @@ void XLink::ClearIncoming(XTensor * node) ...@@ -109,7 +143,6 @@ void XLink::ClearIncoming(XTensor * node)
} }
income.ClearTail(); income.ClearTail();
income.tailNum = 0;
} }
/* /*
...@@ -239,6 +272,7 @@ void XLink::MakeLink(XList * list, XTensor * h, int id) ...@@ -239,6 +272,7 @@ void XLink::MakeLink(XList * list, XTensor * h, int id)
XLink &outgo = t->outgo; XLink &outgo = t->outgo;
CheckNTErrors(outgo.head == NULL || outgo.head == t, CheckNTErrors(outgo.head == NULL || outgo.head == t,
"Wrong head of the hyperedge!"); "Wrong head of the hyperedge!");
outgo.SetHead(t);
outgo.AddTail(h); outgo.AddTail(h);
} }
} }
...@@ -277,16 +311,21 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne) ...@@ -277,16 +311,21 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne)
if(oldOne == NULL || newOne == NULL) if(oldOne == NULL || newOne == NULL)
return; return;
XLink::ClearIncoming(newOne);
XLink &newIncome = newOne->income; XLink &newIncome = newOne->income;
XLink &newOutgo = newOne->outgo; XLink &newOutgo = newOne->outgo;
XLink::ClearOutgoing(newOne);
XLink::ClearIncoming(newOne);
if(newIncome.tailNum < oldOne->income.tailNum){
delete[] newIncome.tails; delete[] newIncome.tails;
newIncome.tails = new XTensor*[oldOne->income.tailNum];
}
/* incoming nodes for the new node */ /* incoming nodes */
newIncome.SetType(oldOne->income.typeID);
newIncome.head = newOne; newIncome.head = newOne;
newIncome.tailNum = oldOne->income.tailNum; newIncome.tailNum = oldOne->income.tailNum;
newIncome.tails = new XTensor*[newIncome.tailNum];
memcpy(newIncome.tails, oldOne->income.tails, sizeof(XTensor*) * newIncome.tailNum); memcpy(newIncome.tails, oldOne->income.tails, sizeof(XTensor*) * newIncome.tailNum);
/* update the link to each child node */ /* update the link to each child node */
...@@ -307,10 +346,14 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne) ...@@ -307,10 +346,14 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne)
} }
} }
/* outgoing nodes for the new node */ if(newOutgo.tailNum < oldOne->outgo.tailNum){
delete[] newOutgo.tails;
newOutgo.tails = new XTensor*[oldOne->outgo.tailNum];
}
/* outgoing nodes */
newOutgo.head = newOne; newOutgo.head = newOne;
newOutgo.tailNum = oldOne->outgo.tailNum; newOutgo.tailNum = oldOne->outgo.tailNum;
newOutgo.tails = new XTensor*[newOutgo.tailNum];
memcpy(newOutgo.tails, oldOne->outgo.tails, sizeof(XTensor*) * newOutgo.tailNum); memcpy(newOutgo.tails, oldOne->outgo.tails, sizeof(XTensor*) * newOutgo.tailNum);
/* update the link to each parent node */ /* update the link to each parent node */
...@@ -385,7 +428,6 @@ void XLink::CheckNetwork(XTensor * root) ...@@ -385,7 +428,6 @@ void XLink::CheckNetwork(XTensor * root)
} }
CheckNTErrors(hit, "Wrong outgoing edge!"); CheckNTErrors(hit, "Wrong outgoing edge!");
} }
} }
XLink &outgo = root->outgo; XLink &outgo = root->outgo;
...@@ -397,15 +439,15 @@ void XLink::CheckNetwork(XTensor * root) ...@@ -397,15 +439,15 @@ void XLink::CheckNetwork(XTensor * root)
XTensor * parent = outgo.tails[i]; XTensor * parent = outgo.tails[i];
if(parent == NULL) if(parent == NULL)
continue; continue;
XLink & parentOutgo = parent->outgo; XLink & parentIncome = parent->income;
bool hit = false; bool hit = false;
for(int j = 0; j < parentOutgo.tailNum; j++){ for(int j = 0; j < parentIncome.tailNum; j++){
if(parentOutgo.tails[j] == root){ if(parentIncome.tails[j] == root){
hit = true; hit = true;
break; break;
} }
} }
CheckNTErrors(hit, "Wrong outgoing edge!"); CheckNTErrors(hit, "Wrong incoming edge!");
} }
} }
...@@ -429,7 +471,7 @@ void XLink::ShowNetwork(FILE * file, XTensor * root) ...@@ -429,7 +471,7 @@ void XLink::ShowNetwork(FILE * file, XTensor * root)
fprintf(file, "income[%d]: null ", income.tailNum); fprintf(file, "income[%d]: null ", income.tailNum);
} }
else{ else{
fprintf(file, "income[%d]: ", income.tailNum); fprintf(file, "income[%d, %s]: ", income.tailNum, GetOPName(income.typeID));
for(int i = 0; i < income.tailNum; i++){ for(int i = 0; i < income.tailNum; i++){
XTensor * child = income.tails[i]; XTensor * child = income.tails[i];
if(child == NULL) if(child == NULL)
...@@ -438,13 +480,14 @@ void XLink::ShowNetwork(FILE * file, XTensor * root) ...@@ -438,13 +480,14 @@ void XLink::ShowNetwork(FILE * file, XTensor * root)
fprintf(file, "%d ", child->id); fprintf(file, "%d ", child->id);
} }
} }
fprintf(stderr, ", ");
XLink &outgo = root->outgo; XLink &outgo = root->outgo;
if(outgo.head == NULL){ if(outgo.head == NULL || outgo.tailNum == 0){
fprintf(file, "outgo[%d]: null ", outgo.tailNum); fprintf(file, "outgo[%d]: null ", outgo.tailNum);
} }
else{ else{
fprintf(file, "outgo[%d]: ", income.tailNum); fprintf(file, "outgo[%d]: ", outgo.tailNum);
for(int i = 0; i < outgo.tailNum; i++){ for(int i = 0; i < outgo.tailNum; i++){
XTensor * parent = outgo.tails[i]; XTensor * parent = outgo.tails[i];
if(parent == NULL) if(parent == NULL)
......
...@@ -96,6 +96,10 @@ struct XLink ...@@ -96,6 +96,10 @@ struct XLink
static static
void ClearIncoming(XTensor * node); void ClearIncoming(XTensor * node);
/* clear the outgoing node list of tensor node */
static
void ClearOutgoing(XTensor * node);
/* set edge type id and name */ /* set edge type id and name */
void SetType(int id); void SetType(int id);
......
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
/* the nts (NiuTrans.Tensor) namespace */ /* the nts (NiuTrans.Tensor) namespace */
namespace nts{ namespace nts{
XList NULLList;
/* constructor */ /* constructor */
XList::XList() XList::XList()
{ {
......
...@@ -96,6 +96,8 @@ public: ...@@ -96,6 +96,8 @@ public:
}; };
extern XList NULLList;
} }
/* end of the nts (NiuTrans.Tensor) namespace */ /* end of the nts (NiuTrans.Tensor) namespace */
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
#define MATH_ARITHMETIC 10000 #define MATH_ARITHMETIC 0x00001000
#define MATH_SUM MATH_ARITHMETIC + 1 #define MATH_SUM MATH_ARITHMETIC + 1
#define MATH_MULTIPLY MATH_SUM + 1 #define MATH_MULTIPLY MATH_SUM + 1
#define MATH_SCALEANDSHIFT MATH_MULTIPLY + 1 #define MATH_SCALEANDSHIFT MATH_MULTIPLY + 1
......
...@@ -61,7 +61,7 @@ namespace nts{ ...@@ -61,7 +61,7 @@ namespace nts{
int tensorIDGlobal = 0; int tensorIDGlobal = 0;
MUTEX_HANDLE tensorMutex; MUTEX_HANDLE tensorMutex;
XTensor firstTensor; XTensor NULLTensor;
/* generate a tensor id */ /* generate a tensor id */
int MakeTensorID() int MakeTensorID()
...@@ -83,24 +83,10 @@ constructor ...@@ -83,24 +83,10 @@ constructor
*/ */
XTensor::XTensor() XTensor::XTensor()
{ {
memset(this, 0, sizeof(XTensor)); Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
order = -1;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
dataType = DEFAULT_DTYPE;
devID = -1;
unitSize = sizeof(float);
unitNum = 0;
unitNumNonZero = 0;
isSparse = false;
isShared = false;
denseRatio = 1.0F;
isDefaultDType = true; isDefaultDType = true;
isInGlobalMem = false; isInGlobalMem = false;
isInit = false; isInit = false;
...@@ -110,16 +96,9 @@ XTensor::XTensor() ...@@ -110,16 +96,9 @@ XTensor::XTensor()
/* constructor */ /* constructor */
XTensor::XTensor(const XTensor * reference) XTensor::XTensor(const XTensor * reference)
{ {
memset(this, 0, sizeof(XTensor)); Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
dataType = DEFAULT_DTYPE;
devID = -1;
denseRatio = 1.0F;
isDefaultDType = true;
isInit = false;
isTmp = false;
InitTensor(this, reference); InitTensor(this, reference);
} }
...@@ -127,36 +106,20 @@ XTensor::XTensor(const XTensor * reference) ...@@ -127,36 +106,20 @@ XTensor::XTensor(const XTensor * reference)
/* /*
constructor constructor
>> myOrder - order of the tensor >> myOrder - order of the tensor
>> myDevID - prefered device id >> myDevID - device id
>> myMem - memory pool used to allocating the data array >> myMem - memory pool used to allocating the data array
*/ */
XTensor::XTensor(const int myOrder, int myDevID, XMem * myMem) XTensor::XTensor(const int myOrder, int myDevID, XMem * myMem)
{ {
CheckNTErrors((myOrder > 0), "Illegal tensor order1"); CheckNTErrors((myOrder > 0), "Illegal tensor order1");
Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
order = myOrder; order = myOrder;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
mem = myMem; mem = myMem;
data = NULL;
dataHost = NULL;
dataType = DEFAULT_DTYPE;
devID = myMem == NULL ? myDevID : myMem->devID; devID = myMem == NULL ? myDevID : myMem->devID;
unitSize = sizeof(float);
unitNum = 0;
unitNumNonZero = 0;
isSparse = false;
isShared = false;
denseRatio = 1.0F;
isDefaultDType = true;
isInGlobalMem = false;
isInit = false;
isTmp = false;
} }
/* /*
...@@ -165,31 +128,21 @@ constructor ...@@ -165,31 +128,21 @@ constructor
>> myDimSize - the size of each dimension >> myDimSize - the size of each dimension
>> myDataType - unit size (e.g., int, float, and double) >> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value >> myDenseRatio - how often an element has non-zero value
>> myDevID - device id
>> myMem - memory pool used to allocating the data array >> myMem - memory pool used to allocating the data array
*/ */
XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType, XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const float myDenseRatio, XMem * myMem) const float myDenseRatio, int myDevID, XMem * myMem)
{ {
CheckNTErrors((myOrder > 0), "Illegal tensor order1"); CheckNTErrors((myOrder > 0), "Illegal tensor order1");
Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
order = myOrder; order = myOrder;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
mem = myMem; mem = myMem;
data = NULL; devID = myMem != NULL ? myMem->devID : myDevID;
dataHost = NULL;
dataType = DEFAULT_DTYPE;
devID = myMem == NULL ? -1 : myMem->devID;
isShared = false;
isDefaultDType = true;
isInGlobalMem = false;
isInit = false;
isTmp = false;
Resize(myOrder, myDimSize, myDataType, myDenseRatio); Resize(myOrder, myDimSize, myDataType, myDenseRatio);
} }
...@@ -197,6 +150,7 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP ...@@ -197,6 +150,7 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP
/* copy constructor */ /* copy constructor */
XTensor::XTensor(const XTensor &reference) XTensor::XTensor(const XTensor &reference)
{ {
Init();
SetDataPointer(); SetDataPointer();
id = MakeTensorID(); id = MakeTensorID();
ShallowCopy(reference); ShallowCopy(reference);
...@@ -229,18 +183,60 @@ XTensor::XTensor(const XTensor &reference) ...@@ -229,18 +183,60 @@ XTensor::XTensor(const XTensor &reference)
XLink::CopyIncoming(&reference, this); XLink::CopyIncoming(&reference, this);
} }
isInit = false; isInit = true;
isTmp = false; isTmp = reference.isTmp;
} }
/* de-constructor */ /* de-constructor */
XTensor::~XTensor() XTensor::~XTensor()
{ {
/* We make a hard copy of the tensor to keep
the connectivity of the graph. To kill memory
leak, we release the data of the new tensor
when its parent is deleted (see ClearIncoming). */
if(isTmp && outgo.tailNum > 0){
int dims[MAX_TENSOR_DIM_NUM];
memcpy(dims, dimSize, order * sizeof(int));
dims[0] = -dims[0];
XTensor * newTensor = new XTensor(order, dims, dataType, denseRatio, devID, mem);
newTensor->SetTMP();
newTensor->data = data;
data = NULL;
XLink::Replace(this, newTensor);
}
XLink::ClearOutgoing(this);
XLink::ClearIncoming(this);
DestroyData(); DestroyData();
}
/* initialize member variables */
void XTensor::Init()
{
id = -1;
mem = NULL;;
data = NULL; data = NULL;
dataHost = NULL; dataHost = NULL;
mem = NULL; dataP = NULL;
XLink::ClearIncoming(this); devID = -1;
order = -1;
memset(dimSize, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
memset(dimSizeRDI, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
dataType = DEFAULT_DTYPE;
unitSize = sizeof(float);
unitNum = 0;
isSparse = false;
unitNumNonZero = 0;
denseRatio = 1.0F;
isShared = false;
isDefaultDType = true;
isInGlobalMem = false;
memset(isAllValued, 0, sizeof(bool) * MAX_TENSOR_DIM_NUM);
isInit = false;
isTmp = false;
} }
/* delete data arrays */ /* delete data arrays */
...@@ -284,7 +280,7 @@ void XTensor::ShallowCopy(const XTensor &tensor) ...@@ -284,7 +280,7 @@ void XTensor::ShallowCopy(const XTensor &tensor)
/* overloading of the equal-sign */ /* overloading of the equal-sign */
XTensor& XTensor::operator= (const XTensor& tensor) XTensor& XTensor::operator= (const XTensor& tensor)
{ {
/* hard copy of data array */ /* hard copy of the data array */
int size = unitNum * unitSize; int size = unitNum * unitSize;
if( isInit && !isSparse && !tensor.isSparse && if( isInit && !isSparse && !tensor.isSparse &&
size == tensor.unitNum * tensor.unitSize && size == tensor.unitNum * tensor.unitSize &&
...@@ -1345,16 +1341,25 @@ void XTensor::Dump(FILE * file, const char * label, const int n, const int verbo ...@@ -1345,16 +1341,25 @@ void XTensor::Dump(FILE * file, const char * label, const int n, const int verbo
if (label != NULL) if (label != NULL)
fprintf(file, "%s ", label); fprintf(file, "%s ", label);
if(isInit){
fprintf(file, "order=%d dimsize=", order); fprintf(file, "order=%d dimsize=", order);
for (int i = 0; i < order; i++) { for (int i = 0; i < order; i++) {
fprintf(file, "%d", dimSize[i]); fprintf(file, "%d", dimSize[i]);
if (i < order - 1) if (i < order - 1)
fprintf(file, ","); fprintf(file, ",");
} }
}
else{
fprintf(file, "order=-1 dimsize=-1");
}
fprintf(file, " dtype=%s dense=%f\n", GetDataTypeName(dataType), denseRatio); fprintf(file, " dtype=%s dense=%f\n", GetDataTypeName(dataType), denseRatio);
if(!isInit){
fprintf(file, "NULL");
}
if (!isSparse) { if (!isSparse) {
if (dataType == DEFAULT_DTYPE) { if (dataType == DEFAULT_DTYPE) {
if (unitNum > 0) { if (unitNum > 0) {
...@@ -1813,7 +1818,7 @@ XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_ ...@@ -1813,7 +1818,7 @@ XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_
const float myDenseRatio, const int myDevID, XMem * myMem) const float myDenseRatio, const int myDevID, XMem * myMem)
{ {
if(myMem != NULL) if(myMem != NULL)
return new XTensor(myOrder, myDimSize, myDataType, myDenseRatio, myMem); return new XTensor(myOrder, myDimSize, myDataType, myDenseRatio, myDevID, myMem);
else{ else{
XTensor * tensor = new XTensor(); XTensor * tensor = new XTensor();
InitTensor(tensor, myOrder, myDimSize, myDataType, myDenseRatio, myDevID, myMem); InitTensor(tensor, myOrder, myDimSize, myDataType, myDenseRatio, myDevID, myMem);
...@@ -1984,7 +1989,9 @@ XTensor * NewTensor(XTensor * a, bool isFilledData) ...@@ -1984,7 +1989,9 @@ XTensor * NewTensor(XTensor * a, bool isFilledData)
if(!isFilledData) if(!isFilledData)
dims[0] = -dims[0]; dims[0] = -dims[0];
XTensor * newTensor = new XTensor(a->order, dims, a->dataType, a->denseRatio, a->mem); XTensor * newTensor = new XTensor(a->order, dims,
a->dataType, a->denseRatio,
a->devID, a->mem);
delete[] dims; delete[] dims;
......
...@@ -167,7 +167,7 @@ public: ...@@ -167,7 +167,7 @@ public:
/* constructor */ /* constructor */
XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType, XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType,
const float myDenseRatio, XMem * myMem); const float myDenseRatio, int myDevID, XMem * myMem);
/* copy constructor */ /* copy constructor */
XTensor(const XTensor &reference); XTensor(const XTensor &reference);
...@@ -175,6 +175,9 @@ public: ...@@ -175,6 +175,9 @@ public:
/* de-constructor */ /* de-constructor */
~XTensor(); ~XTensor();
/* initialize member variables */
void Init();
/* delete data arrays */ /* delete data arrays */
void DestroyData(); void DestroyData();
...@@ -331,7 +334,7 @@ public: ...@@ -331,7 +334,7 @@ public:
/* we make a unique id for every tensor */ /* we make a unique id for every tensor */
extern int tensorIDGlobal; extern int tensorIDGlobal;
extern MUTEX_HANDLE tensorMutex; extern MUTEX_HANDLE tensorMutex;
extern XTensor firstTensor; extern XTensor NULLTensor;
extern int MakeTensorID(); extern int MakeTensorID();
/************************************************ /************************************************
......
...@@ -90,8 +90,8 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA, ...@@ -90,8 +90,8 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
void * bp = (char*)b->data + bRealBlockSize * p; void * bp = (char*)b->data + bRealBlockSize * p;
void * cp = (char*)c->data + cRealBlockSize * p; void * cp = (char*)c->data + cRealBlockSize * p;
XTensor * ai = NewTensor(2, aDimSize, a->dataType, a->denseRatio, a->devID, a->mem); XTensor * ai = NewTensor(2, aDimSize, a->dataType, a->denseRatio, a->devID, a->mem);
XTensor * bi = NewTensor(2, bDimSize, b->dataType, b->denseRatio, a->devID, b->mem); XTensor * bi = NewTensor(2, bDimSize, b->dataType, b->denseRatio, b->devID, b->mem);
XTensor * ci = NewTensor(2, cDimSize, c->dataType, c->denseRatio, a->devID, c->mem); XTensor * ci = NewTensor(2, cDimSize, c->dataType, c->denseRatio, c->devID, c->mem);
ai->data = ap; ai->data = ap;
bi->data = bp; bi->data = bp;
ci->data = cp; ci->data = cp;
......
...@@ -220,7 +220,9 @@ void Merge(XList * smalls, XTensor * big, int whereToMerge) ...@@ -220,7 +220,9 @@ void Merge(XList * smalls, XTensor * big, int whereToMerge)
dimSizeTMP[smallsItem0->order] = -mergeNum; dimSizeTMP[smallsItem0->order] = -mergeNum;
XMem * mem = smallsItem0->mem; XMem * mem = smallsItem0->mem;
XTensor * tensorTMP = new XTensor(smallsItem0->order + 1, dimSizeTMP, smallsItem0->dataType, smallsItem0->denseRatio, mem); XTensor * tensorTMP = new XTensor(smallsItem0->order + 1, dimSizeTMP,
smallsItem0->dataType, smallsItem0->denseRatio,
smallsItem0->devID, mem);
int size = mergeNum * itemSize; int size = mergeNum * itemSize;
void * dataTMP = NULL; void * dataTMP = NULL;
......
...@@ -197,7 +197,7 @@ void Split(XTensor * big, XList * smalls, int whereToSplit, int splitNum) ...@@ -197,7 +197,7 @@ void Split(XTensor * big, XList * smalls, int whereToSplit, int splitNum)
dimSizeTMP[big->order] = -splitNum; dimSizeTMP[big->order] = -splitNum;
XMem * mem = big->mem; XMem * mem = big->mem;
XTensor* tensorTMP = new XTensor(big->order + 1, dimSizeTMP, big->dataType, big->denseRatio, mem); XTensor* tensorTMP = new XTensor(big->order + 1, dimSizeTMP, big->dataType, big->denseRatio, big->devID, mem);
int size = big->unitNum * big->unitSize; int size = big->unitNum * big->unitSize;
void * dataTMP = NULL; void * dataTMP = NULL;
......
...@@ -433,7 +433,7 @@ void CudaTopK(XTensor * a, XTensor * b, XTensor * index, int dim, int k) ...@@ -433,7 +433,7 @@ void CudaTopK(XTensor * a, XTensor * b, XTensor * index, int dim, int k)
int dimSize[MAX_TENSOR_DIM_NUM]; int dimSize[MAX_TENSOR_DIM_NUM];
memcpy(dimSize, a->dimSize, sizeof(int) * a->order); memcpy(dimSize, a->dimSize, sizeof(int) * a->order);
dimSize[0] = -dimSize[0]; dimSize[0] = -dimSize[0];
XTensor * indexA = new XTensor(a->order, dimSize, X_INT, 1.0F, a->mem); XTensor * indexA = new XTensor(a->order, dimSize, X_INT, 1.0F, a->devID, a->mem);
indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int)); indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int));
/* make the index tensor */ /* make the index tensor */
......
...@@ -282,7 +282,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x, ...@@ -282,7 +282,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * ytmp = NewTensor(y, false); XTensor * ytmp = NewTensor(y, false);
/* make a matrix to keep \beta */ /* make a matrix to keep \beta */
XTensor * beta = new XTensor(y->order - 1, dimSize, y->dataType, y->denseRatio, mem); XTensor * beta = new XTensor(y->order - 1, dimSize, y->dataType, y->denseRatio, y->devID, mem);
ytmp->data = mem->AllocBuf(mem->devID, y->unitNum * y->unitSize); ytmp->data = mem->AllocBuf(mem->devID, y->unitNum * y->unitSize);
beta->data = mem->AllocBuf(mem->devID, beta->unitNum * beta->unitSize); beta->data = mem->AllocBuf(mem->devID, beta->unitNum * beta->unitSize);
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论