Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
Emmay
NiuTrans.Tensor
Commits
9b11391e
Commit
9b11391e
authored
Jul 12, 2018
by
liyinqiao
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
1. Merge with xiao branch; 2. Update doc
parent
1410c491
全部展开
隐藏空白字符变更
内嵌
并排
正在显示
18 个修改的文件
包含
263 行增加
和
117 行删除
+263
-117
doc/manual.md
+0
-0
source/network/Main.cpp
+1
-1
source/network/XNet.cpp
+27
-0
source/network/XNet.h
+48
-0
source/tensor/Main.cpp
+21
-13
source/tensor/XLink.cpp
+65
-22
source/tensor/XLink.h
+4
-0
source/tensor/XList.cpp
+2
-0
source/tensor/XList.h
+2
-0
source/tensor/XName.h
+1
-1
source/tensor/XTensor.cpp
+77
-70
source/tensor/XTensor.h
+5
-2
source/tensor/core/arithmetic/MatrixMulBatched.cpp
+2
-2
source/tensor/core/arithmetic/SumByColumnVT.cu
+2
-2
source/tensor/core/shape/Merge.cpp
+3
-1
source/tensor/core/shape/Split.cpp
+1
-1
source/tensor/core/sort/TopK.cu
+1
-1
source/tensor/function/Softmax.cu
+1
-1
没有找到文件。
doc/manual.md
查看文件 @
9b11391e
差异被折叠。
点击展开。
source/network/Main.cpp
查看文件 @
9b11391e
...
...
@@ -20,7 +20,7 @@
*/
#include <stdio.h>
#include "
../tensor/XTensor
.h"
#include "
XNet
.h"
//#define CRTDBG_MAP_ALLOC
//#include <stdlib.h>
...
...
source/network/XNet.cpp
0 → 100644
查看文件 @
9b11391e
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-12
*/
#include "XNet.h"
namespace
nts
{
}
\ No newline at end of file
source/network/XNet.h
0 → 100644
查看文件 @
9b11391e
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-12
* We expected a heavy rain today but a drizzle came down. Should I
* take a big umbrella?
*/
#include "../tensor/XTensor.h"
#include "../tensor/function/FHeader.h"
#ifndef __XNET_H__
#define __XNET_H__
namespace
nts
{
/* management of tensor net (or graph) */
class
XNet
{
public
:
/* backward propagation to obtain gradient wrt. the loss/error function */
void
Backward
(
XTensor
&
root
,
XTensor
&
gold
=
NULLTensor
,
LOSS_FUNCTION_NAME
loss
=
NOLOSS
);
/* backward propagation to obtain gradient wrt. the loss/error function
with a number of root nodes */
void
Backward
(
XList
&
roots
,
XList
&
golds
=
NULLList
,
LOSS_FUNCTION_NAME
loss
=
NOLOSS
);
};
}
#endif
\ No newline at end of file
source/tensor/Main.cpp
查看文件 @
9b11391e
...
...
@@ -42,11 +42,14 @@ void SmallTest();
int
main
(
int
argc
,
const
char
**
argv
)
{
//_CrtSetBreakAlloc(
78
);
//_CrtSetBreakAlloc(
123
);
/* a tiny test */
//if(1)
// SmallTest();
if
(
false
)
SmallTest
();
//_CrtDumpMemoryLeaks();
//return 0;
if
(
argc
>
1
&&
!
strcmp
(
argv
[
1
],
"-test"
))
Test
();
...
...
@@ -68,25 +71,30 @@ void SmallTest()
{
XTensor
a
;
XTensor
b
;
XTensor
c
;
XTensor
d
;
InitTensor2D
(
&
a
,
2
,
2
);
InitTensor2D
(
&
b
,
2
,
2
);
a
.
SetZeroAll
();
b
.
SetZeroAll
();
a
.
Set2D
(
1.0
F
,
0
,
0
);
a
.
Set2D
(
2.0
F
,
1
,
1
);
b
=
Sum
(
a
,
Multiply
(
a
,
a
));
XTensor
c
=
a
*
b
+
a
;
int
nnn
=
1
;
XTensor
d
=
a
+
b
+
c
.
Lin
(
0.5
F
);
/* this is prohibited !!!!!!!!!!!!! */
//XTensor c = a * b + a;
//XTensor d = a + b + c.Lin(0.5F);
c
=
a
*
b
+
a
;
d
=
a
+
b
+
c
.
Lin
(
0.5
F
);
XLink
::
CheckNetwork
(
&
d
);
XLink
::
ShowNetwork
(
stderr
,
&
b
);
XLink
::
ShowNetwork
(
stderr
,
&
d
);
a
.
Dump
(
stderr
,
"a:
"
);
b
.
Dump
(
stderr
,
"b:
"
);
c
.
Dump
(
stderr
,
"c:
"
);
d
.
Dump
(
stderr
,
"d:
"
);
a
.
Dump
(
stderr
,
"a:"
);
b
.
Dump
(
stderr
,
"b:"
);
c
.
Dump
(
stderr
,
"c:"
);
d
.
Dump
(
stderr
,
"d:"
);
}
source/tensor/XLink.cpp
查看文件 @
9b11391e
...
...
@@ -42,8 +42,10 @@ XLink::XLink()
/* deconstructor */
XLink
::~
XLink
()
{
delete
[]
tails
;
delete
[]
(
char
*
)
params
;
if
(
tails
!=
NULL
)
delete
[]
tails
;
if
(
params
!=
NULL
)
delete
[]
(
char
*
)
params
;
}
/* reset it */
...
...
@@ -75,6 +77,39 @@ void XLink::ClearTail()
}
/*
clear the outgoing node list of tensor node
>> node - the node to be cleared
*/
void
XLink
::
ClearOutgoing
(
XTensor
*
node
)
{
if
(
node
==
NULL
)
return
;
XLink
&
outgo
=
node
->
outgo
;
for
(
int
i
=
0
;
i
<
outgo
.
tailNum
;
i
++
){
/* for each parent node */
XTensor
*
parent
=
outgo
.
tails
[
i
];
XLink
&
parentIncome
=
parent
->
income
;
CheckNTErrors
(
parentIncome
.
tailNum
>
0
,
"The node must have incoming edges!"
);
/* we check for each parent node and remove the link to current node */
for
(
int
j
=
0
;
j
<
parentIncome
.
tailNum
;
j
++
){
if
(
parentIncome
.
tails
[
j
]
==
node
){
memcpy
(
parentIncome
.
tails
+
j
,
parentIncome
.
tails
+
j
+
1
,
sizeof
(
XTensor
*
)
*
(
parentIncome
.
tailNum
-
1
-
j
));
parentIncome
.
tailNum
--
;
break
;
}
}
}
outgo
.
ClearTail
();
}
/*
clear the incoming node list of tensor node
>> node - the node to be cleared
*/
...
...
@@ -87,7 +122,7 @@ void XLink::ClearIncoming(XTensor * node)
for
(
int
i
=
0
;
i
<
income
.
tailNum
;
i
++
){
/* for
a
incoming node */
/* for
each
incoming node */
XTensor
*
child
=
income
.
tails
[
i
];
XLink
&
childOutgo
=
child
->
outgo
;
...
...
@@ -96,9 +131,8 @@ void XLink::ClearIncoming(XTensor * node)
/* we check for each child node and remove the link to current node */
for
(
int
j
=
0
;
j
<
childOutgo
.
tailNum
;
j
++
){
if
(
childOutgo
.
tails
[
j
]
==
node
){
memcpy
(
childOutgo
.
tails
+
j
,
childOutgo
.
tails
+
j
+
1
,
(
childOutgo
.
tailNum
-
1
-
j
)
*
sizeof
(
XTensor
*
));
memcpy
(
childOutgo
.
tails
+
j
,
childOutgo
.
tails
+
j
+
1
,
sizeof
(
XTensor
*
)
*
(
childOutgo
.
tailNum
-
1
-
j
));
childOutgo
.
tailNum
--
;
break
;
}
...
...
@@ -109,7 +143,6 @@ void XLink::ClearIncoming(XTensor * node)
}
income
.
ClearTail
();
income
.
tailNum
=
0
;
}
/*
...
...
@@ -239,6 +272,7 @@ void XLink::MakeLink(XList * list, XTensor * h, int id)
XLink
&
outgo
=
t
->
outgo
;
CheckNTErrors
(
outgo
.
head
==
NULL
||
outgo
.
head
==
t
,
"Wrong head of the hyperedge!"
);
outgo
.
SetHead
(
t
);
outgo
.
AddTail
(
h
);
}
}
...
...
@@ -276,17 +310,22 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne)
{
if
(
oldOne
==
NULL
||
newOne
==
NULL
)
return
;
XLink
&
newIncome
=
newOne
->
income
;
XLink
&
newOutgo
=
newOne
->
outgo
;
XLink
::
ClearOutgoing
(
newOne
);
XLink
::
ClearIncoming
(
newOne
);
XLink
&
newIncome
=
newOne
->
income
;
XLink
&
newOutgo
=
newOne
->
outgo
;
delete
[]
newIncome
.
tails
;
if
(
newIncome
.
tailNum
<
oldOne
->
income
.
tailNum
){
delete
[]
newIncome
.
tails
;
newIncome
.
tails
=
new
XTensor
*
[
oldOne
->
income
.
tailNum
];
}
/* incoming nodes for the new node */
/* incoming nodes */
newIncome
.
SetType
(
oldOne
->
income
.
typeID
);
newIncome
.
head
=
newOne
;
newIncome
.
tailNum
=
oldOne
->
income
.
tailNum
;
newIncome
.
tails
=
new
XTensor
*
[
newIncome
.
tailNum
];
memcpy
(
newIncome
.
tails
,
oldOne
->
income
.
tails
,
sizeof
(
XTensor
*
)
*
newIncome
.
tailNum
);
/* update the link to each child node */
...
...
@@ -306,11 +345,15 @@ void XLink::Replace(const XTensor * oldOne, XTensor * newOne)
CheckNTErrors
(
hit
,
"No proper node found in child.outgo edge!"
);
}
}
if
(
newOutgo
.
tailNum
<
oldOne
->
outgo
.
tailNum
){
delete
[]
newOutgo
.
tails
;
newOutgo
.
tails
=
new
XTensor
*
[
oldOne
->
outgo
.
tailNum
];
}
/* outgoing nodes
for the new node
*/
/* outgoing nodes */
newOutgo
.
head
=
newOne
;
newOutgo
.
tailNum
=
oldOne
->
outgo
.
tailNum
;
newOutgo
.
tails
=
new
XTensor
*
[
newOutgo
.
tailNum
];
memcpy
(
newOutgo
.
tails
,
oldOne
->
outgo
.
tails
,
sizeof
(
XTensor
*
)
*
newOutgo
.
tailNum
);
/* update the link to each parent node */
...
...
@@ -385,7 +428,6 @@ void XLink::CheckNetwork(XTensor * root)
}
CheckNTErrors
(
hit
,
"Wrong outgoing edge!"
);
}
}
XLink
&
outgo
=
root
->
outgo
;
...
...
@@ -397,15 +439,15 @@ void XLink::CheckNetwork(XTensor * root)
XTensor
*
parent
=
outgo
.
tails
[
i
];
if
(
parent
==
NULL
)
continue
;
XLink
&
parent
Outgo
=
parent
->
outgo
;
XLink
&
parent
Income
=
parent
->
income
;
bool
hit
=
false
;
for
(
int
j
=
0
;
j
<
parent
Outgo
.
tailNum
;
j
++
){
if
(
parent
Outgo
.
tails
[
j
]
==
root
){
for
(
int
j
=
0
;
j
<
parent
Income
.
tailNum
;
j
++
){
if
(
parent
Income
.
tails
[
j
]
==
root
){
hit
=
true
;
break
;
}
}
CheckNTErrors
(
hit
,
"Wrong
outgo
ing edge!"
);
CheckNTErrors
(
hit
,
"Wrong
incom
ing edge!"
);
}
}
...
...
@@ -429,7 +471,7 @@ void XLink::ShowNetwork(FILE * file, XTensor * root)
fprintf
(
file
,
"income[%d]: null "
,
income
.
tailNum
);
}
else
{
fprintf
(
file
,
"income[%d
]: "
,
income
.
tailNum
);
fprintf
(
file
,
"income[%d
, %s]: "
,
income
.
tailNum
,
GetOPName
(
income
.
typeID
)
);
for
(
int
i
=
0
;
i
<
income
.
tailNum
;
i
++
){
XTensor
*
child
=
income
.
tails
[
i
];
if
(
child
==
NULL
)
...
...
@@ -438,13 +480,14 @@ void XLink::ShowNetwork(FILE * file, XTensor * root)
fprintf
(
file
,
"%d "
,
child
->
id
);
}
}
fprintf
(
stderr
,
", "
);
XLink
&
outgo
=
root
->
outgo
;
if
(
outgo
.
head
==
NULL
){
if
(
outgo
.
head
==
NULL
||
outgo
.
tailNum
==
0
){
fprintf
(
file
,
"outgo[%d]: null "
,
outgo
.
tailNum
);
}
else
{
fprintf
(
file
,
"outgo[%d]: "
,
income
.
tailNum
);
fprintf
(
file
,
"outgo[%d]: "
,
outgo
.
tailNum
);
for
(
int
i
=
0
;
i
<
outgo
.
tailNum
;
i
++
){
XTensor
*
parent
=
outgo
.
tails
[
i
];
if
(
parent
==
NULL
)
...
...
source/tensor/XLink.h
查看文件 @
9b11391e
...
...
@@ -95,6 +95,10 @@ struct XLink
/* clear the incoming node list of tensor node */
static
void
ClearIncoming
(
XTensor
*
node
);
/* clear the outgoing node list of tensor node */
static
void
ClearOutgoing
(
XTensor
*
node
);
/* set edge type id and name */
void
SetType
(
int
id
);
...
...
source/tensor/XList.cpp
查看文件 @
9b11391e
...
...
@@ -42,6 +42,8 @@
/* the nts (NiuTrans.Tensor) namespace */
namespace
nts
{
XList
NULLList
;
/* constructor */
XList
::
XList
()
{
...
...
source/tensor/XList.h
查看文件 @
9b11391e
...
...
@@ -96,6 +96,8 @@ public:
};
extern
XList
NULLList
;
}
/* end of the nts (NiuTrans.Tensor) namespace */
...
...
source/tensor/XName.h
查看文件 @
9b11391e
...
...
@@ -28,7 +28,7 @@
namespace
nts
{
// namespace nts(NiuTrans.Tensor)
#define MATH_ARITHMETIC
10
000
#define MATH_ARITHMETIC
0x00001
000
#define MATH_SUM MATH_ARITHMETIC + 1
#define MATH_MULTIPLY MATH_SUM + 1
#define MATH_SCALEANDSHIFT MATH_MULTIPLY + 1
...
...
source/tensor/XTensor.cpp
查看文件 @
9b11391e
...
...
@@ -61,7 +61,7 @@ namespace nts{
int
tensorIDGlobal
=
0
;
MUTEX_HANDLE
tensorMutex
;
XTensor
first
Tensor
;
XTensor
NULL
Tensor
;
/* generate a tensor id */
int
MakeTensorID
()
...
...
@@ -83,24 +83,10 @@ constructor
*/
XTensor
::
XTensor
()
{
memset
(
this
,
0
,
sizeof
(
XTensor
)
);
Init
(
);
SetDataPointer
();
id
=
MakeTensorID
();
order
=
-
1
;
memset
(
dimSize
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
dimSizeRDI
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
isAllValued
,
0
,
sizeof
(
bool
)
*
MAX_TENSOR_DIM_NUM
);
dataType
=
DEFAULT_DTYPE
;
devID
=
-
1
;
unitSize
=
sizeof
(
float
);
unitNum
=
0
;
unitNumNonZero
=
0
;
isSparse
=
false
;
isShared
=
false
;
denseRatio
=
1.0
F
;
isDefaultDType
=
true
;
isInGlobalMem
=
false
;
isInit
=
false
;
...
...
@@ -110,16 +96,9 @@ XTensor::XTensor()
/* constructor */
XTensor
::
XTensor
(
const
XTensor
*
reference
)
{
memset
(
this
,
0
,
sizeof
(
XTensor
)
);
Init
(
);
SetDataPointer
();
id
=
MakeTensorID
();
dataType
=
DEFAULT_DTYPE
;
devID
=
-
1
;
denseRatio
=
1.0
F
;
isDefaultDType
=
true
;
isInit
=
false
;
isTmp
=
false
;
InitTensor
(
this
,
reference
);
}
...
...
@@ -127,36 +106,20 @@ XTensor::XTensor(const XTensor * reference)
/*
constructor
>> myOrder - order of the tensor
>> myDevID -
prefered
device id
>> myDevID - device id
>> myMem - memory pool used to allocating the data array
*/
XTensor
::
XTensor
(
const
int
myOrder
,
int
myDevID
,
XMem
*
myMem
)
{
CheckNTErrors
((
myOrder
>
0
),
"Illegal tensor order1"
);
Init
();
SetDataPointer
();
id
=
MakeTensorID
();
order
=
myOrder
;
memset
(
dimSize
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
dimSizeRDI
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
isAllValued
,
0
,
sizeof
(
bool
)
*
MAX_TENSOR_DIM_NUM
);
mem
=
myMem
;
data
=
NULL
;
dataHost
=
NULL
;
dataType
=
DEFAULT_DTYPE
;
devID
=
myMem
==
NULL
?
myDevID
:
myMem
->
devID
;
unitSize
=
sizeof
(
float
);
unitNum
=
0
;
unitNumNonZero
=
0
;
isSparse
=
false
;
isShared
=
false
;
denseRatio
=
1.0
F
;
isDefaultDType
=
true
;
isInGlobalMem
=
false
;
isInit
=
false
;
isTmp
=
false
;
}
/*
...
...
@@ -165,31 +128,21 @@ constructor
>> myDimSize - the size of each dimension
>> myDataType - unit size (e.g., int, float, and double)
>> myDenseRatio - how often an element has non-zero value
>> myDevID - device id
>> myMem - memory pool used to allocating the data array
*/
XTensor
::
XTensor
(
const
int
myOrder
,
const
int
*
myDimSize
,
const
TENSOR_DATA_TYPE
myDataType
,
const
float
myDenseRatio
,
XMem
*
myMem
)
const
float
myDenseRatio
,
int
myDevID
,
XMem
*
myMem
)
{
CheckNTErrors
((
myOrder
>
0
),
"Illegal tensor order1"
);
Init
();
SetDataPointer
();
id
=
MakeTensorID
();
order
=
myOrder
;
memset
(
dimSize
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
dimSizeRDI
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
isAllValued
,
0
,
sizeof
(
bool
)
*
MAX_TENSOR_DIM_NUM
);
mem
=
myMem
;
data
=
NULL
;
dataHost
=
NULL
;
dataType
=
DEFAULT_DTYPE
;
devID
=
myMem
==
NULL
?
-
1
:
myMem
->
devID
;
isShared
=
false
;
isDefaultDType
=
true
;
isInGlobalMem
=
false
;
isInit
=
false
;
isTmp
=
false
;
devID
=
myMem
!=
NULL
?
myMem
->
devID
:
myDevID
;
Resize
(
myOrder
,
myDimSize
,
myDataType
,
myDenseRatio
);
}
...
...
@@ -197,6 +150,7 @@ XTensor::XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYP
/* copy constructor */
XTensor
::
XTensor
(
const
XTensor
&
reference
)
{
Init
();
SetDataPointer
();
id
=
MakeTensorID
();
ShallowCopy
(
reference
);
...
...
@@ -229,18 +183,60 @@ XTensor::XTensor(const XTensor &reference)
XLink
::
CopyIncoming
(
&
reference
,
this
);
}
isInit
=
fals
e
;
isTmp
=
false
;
isInit
=
tru
e
;
isTmp
=
reference
.
isTmp
;
}
/* de-constructor */
XTensor
::~
XTensor
()
{
/* We make a hard copy of the tensor to keep
the connectivity of the graph. To kill memory
leak, we release the data of the new tensor
when its parent is deleted (see ClearIncoming). */
if
(
isTmp
&&
outgo
.
tailNum
>
0
){
int
dims
[
MAX_TENSOR_DIM_NUM
];
memcpy
(
dims
,
dimSize
,
order
*
sizeof
(
int
));
dims
[
0
]
=
-
dims
[
0
];
XTensor
*
newTensor
=
new
XTensor
(
order
,
dims
,
dataType
,
denseRatio
,
devID
,
mem
);
newTensor
->
SetTMP
();
newTensor
->
data
=
data
;
data
=
NULL
;
XLink
::
Replace
(
this
,
newTensor
);
}
XLink
::
ClearOutgoing
(
this
);
XLink
::
ClearIncoming
(
this
);
DestroyData
();
}
/* initialize member variables */
void
XTensor
::
Init
()
{
id
=
-
1
;
mem
=
NULL
;;
data
=
NULL
;
dataHost
=
NULL
;
mem
=
NULL
;
XLink
::
ClearIncoming
(
this
);
dataP
=
NULL
;
devID
=
-
1
;
order
=
-
1
;
memset
(
dimSize
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
memset
(
dimSizeRDI
,
0
,
sizeof
(
int
)
*
MAX_TENSOR_DIM_NUM
);
dataType
=
DEFAULT_DTYPE
;
unitSize
=
sizeof
(
float
);
unitNum
=
0
;
isSparse
=
false
;
unitNumNonZero
=
0
;
denseRatio
=
1.0
F
;
isShared
=
false
;
isDefaultDType
=
true
;
isInGlobalMem
=
false
;
memset
(
isAllValued
,
0
,
sizeof
(
bool
)
*
MAX_TENSOR_DIM_NUM
);
isInit
=
false
;
isTmp
=
false
;
}
/* delete data arrays */
...
...
@@ -284,7 +280,7 @@ void XTensor::ShallowCopy(const XTensor &tensor)
/* overloading of the equal-sign */
XTensor
&
XTensor
::
operator
=
(
const
XTensor
&
tensor
)
{
/* hard copy of data array */
/* hard copy of
the
data array */
int
size
=
unitNum
*
unitSize
;
if
(
isInit
&&
!
isSparse
&&
!
tensor
.
isSparse
&&
size
==
tensor
.
unitNum
*
tensor
.
unitSize
&&
...
...
@@ -1345,16 +1341,25 @@ void XTensor::Dump(FILE * file, const char * label, const int n, const int verbo
if
(
label
!=
NULL
)
fprintf
(
file
,
"%s "
,
label
);
fprintf
(
file
,
"order=%d dimsize="
,
order
);
for
(
int
i
=
0
;
i
<
order
;
i
++
)
{
fprintf
(
file
,
"%d"
,
dimSize
[
i
]);
if
(
i
<
order
-
1
)
fprintf
(
file
,
","
);
if
(
isInit
){
fprintf
(
file
,
"order=%d dimsize="
,
order
);
for
(
int
i
=
0
;
i
<
order
;
i
++
)
{
fprintf
(
file
,
"%d"
,
dimSize
[
i
]);
if
(
i
<
order
-
1
)
fprintf
(
file
,
","
);
}
}
else
{
fprintf
(
file
,
"order=-1 dimsize=-1"
);
}
fprintf
(
file
,
" dtype=%s dense=%f
\n
"
,
GetDataTypeName
(
dataType
),
denseRatio
);
if
(
!
isInit
){
fprintf
(
file
,
"NULL"
);
}
if
(
!
isSparse
)
{
if
(
dataType
==
DEFAULT_DTYPE
)
{
if
(
unitNum
>
0
)
{
...
...
@@ -1813,7 +1818,7 @@ XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_
const
float
myDenseRatio
,
const
int
myDevID
,
XMem
*
myMem
)
{
if
(
myMem
!=
NULL
)
return
new
XTensor
(
myOrder
,
myDimSize
,
myDataType
,
myDenseRatio
,
myMem
);
return
new
XTensor
(
myOrder
,
myDimSize
,
myDataType
,
myDenseRatio
,
my
DevID
,
my
Mem
);
else
{
XTensor
*
tensor
=
new
XTensor
();
InitTensor
(
tensor
,
myOrder
,
myDimSize
,
myDataType
,
myDenseRatio
,
myDevID
,
myMem
);
...
...
@@ -1984,7 +1989,9 @@ XTensor * NewTensor(XTensor * a, bool isFilledData)
if
(
!
isFilledData
)
dims
[
0
]
=
-
dims
[
0
];
XTensor
*
newTensor
=
new
XTensor
(
a
->
order
,
dims
,
a
->
dataType
,
a
->
denseRatio
,
a
->
mem
);
XTensor
*
newTensor
=
new
XTensor
(
a
->
order
,
dims
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
delete
[]
dims
;
...
...
source/tensor/XTensor.h
查看文件 @
9b11391e
...
...
@@ -167,7 +167,7 @@ public:
/* constructor */
XTensor
(
const
int
myOrder
,
const
int
*
myDimSize
,
const
TENSOR_DATA_TYPE
myDataType
,
const
float
myDenseRatio
,
XMem
*
myMem
);
const
float
myDenseRatio
,
int
myDevID
,
XMem
*
myMem
);
/* copy constructor */
XTensor
(
const
XTensor
&
reference
);
...
...
@@ -175,6 +175,9 @@ public:
/* de-constructor */
~
XTensor
();
/* initialize member variables */
void
Init
();
/* delete data arrays */
void
DestroyData
();
...
...
@@ -331,7 +334,7 @@ public:
/* we make a unique id for every tensor */
extern
int
tensorIDGlobal
;
extern
MUTEX_HANDLE
tensorMutex
;
extern
XTensor
first
Tensor
;
extern
XTensor
NULL
Tensor
;
extern
int
MakeTensorID
();
/************************************************
...
...
source/tensor/core/arithmetic/MatrixMulBatched.cpp
查看文件 @
9b11391e
...
...
@@ -90,8 +90,8 @@ void MatrixMulBatched(XTensor * a, MATRIX_TRANS_TYPE transposedA,
void
*
bp
=
(
char
*
)
b
->
data
+
bRealBlockSize
*
p
;
void
*
cp
=
(
char
*
)
c
->
data
+
cRealBlockSize
*
p
;
XTensor
*
ai
=
NewTensor
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
bi
=
NewTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
a
->
devID
,
b
->
mem
);
XTensor
*
ci
=
NewTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
a
->
devID
,
c
->
mem
);
XTensor
*
bi
=
NewTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
XTensor
*
ci
=
NewTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
c
->
devID
,
c
->
mem
);
ai
->
data
=
ap
;
bi
->
data
=
bp
;
ci
->
data
=
cp
;
...
...
source/tensor/core/arithmetic/SumByColumnVT.cu
查看文件 @
9b11391e
...
...
@@ -91,7 +91,7 @@ void CudaSumByColumnVT(XTensor * a, XTensor * b, XTensor * c, DTYPE beta)
"Illegal input vector size!");
CheckNTErrors((a->dataType == DEFAULT_DTYPE && b->dataType == DEFAULT_DTYPE &&
c->dataType == DEFAULT_DTYPE), "TODO");
int rowNum = b->dimSize[0];
int colNum = b->dimSize[1];
int blockNum = 1;
...
...
@@ -105,7 +105,7 @@ void CudaSumByColumnVT(XTensor * a, XTensor * b, XTensor * c, DTYPE beta)
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
KernelADDByColumnVT << <dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >> >
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, colNum, rowNum, blockNum, beta);
...
...
source/tensor/core/shape/Merge.cpp
查看文件 @
9b11391e
...
...
@@ -220,7 +220,9 @@ void Merge(XList * smalls, XTensor * big, int whereToMerge)
dimSizeTMP
[
smallsItem0
->
order
]
=
-
mergeNum
;
XMem
*
mem
=
smallsItem0
->
mem
;
XTensor
*
tensorTMP
=
new
XTensor
(
smallsItem0
->
order
+
1
,
dimSizeTMP
,
smallsItem0
->
dataType
,
smallsItem0
->
denseRatio
,
mem
);
XTensor
*
tensorTMP
=
new
XTensor
(
smallsItem0
->
order
+
1
,
dimSizeTMP
,
smallsItem0
->
dataType
,
smallsItem0
->
denseRatio
,
smallsItem0
->
devID
,
mem
);
int
size
=
mergeNum
*
itemSize
;
void
*
dataTMP
=
NULL
;
...
...
source/tensor/core/shape/Split.cpp
查看文件 @
9b11391e
...
...
@@ -197,7 +197,7 @@ void Split(XTensor * big, XList * smalls, int whereToSplit, int splitNum)
dimSizeTMP
[
big
->
order
]
=
-
splitNum
;
XMem
*
mem
=
big
->
mem
;
XTensor
*
tensorTMP
=
new
XTensor
(
big
->
order
+
1
,
dimSizeTMP
,
big
->
dataType
,
big
->
denseRatio
,
mem
);
XTensor
*
tensorTMP
=
new
XTensor
(
big
->
order
+
1
,
dimSizeTMP
,
big
->
dataType
,
big
->
denseRatio
,
big
->
devID
,
mem
);
int
size
=
big
->
unitNum
*
big
->
unitSize
;
void
*
dataTMP
=
NULL
;
...
...
source/tensor/core/sort/TopK.cu
查看文件 @
9b11391e
...
...
@@ -433,7 +433,7 @@ void CudaTopK(XTensor * a, XTensor * b, XTensor * index, int dim, int k)
int dimSize[MAX_TENSOR_DIM_NUM];
memcpy(dimSize, a->dimSize, sizeof(int) * a->order);
dimSize[0] = -dimSize[0];
XTensor * indexA = new XTensor(a->order, dimSize, X_INT, 1.0F, a->mem);
XTensor * indexA = new XTensor(a->order, dimSize, X_INT, 1.0F, a->
devID, a->
mem);
indexA->data = a->mem != NULL ? a->mem->AllocBuf(a->devID, a->unitNum * sizeof(int)) : XMemAlloc(a->devID, a->unitNum * sizeof(int));
/* make the index tensor */
...
...
source/tensor/function/Softmax.cu
查看文件 @
9b11391e
...
...
@@ -282,7 +282,7 @@ void CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * ytmp = NewTensor(y, false);
/* make a matrix to keep \beta */
XTensor * beta = new XTensor(y->order - 1, dimSize, y->dataType, y->denseRatio, mem);
XTensor * beta = new XTensor(y->order - 1, dimSize, y->dataType, y->denseRatio,
y->devID,
mem);
ytmp->data = mem->AllocBuf(mem->devID, y->unitNum * y->unitSize);
beta->data = mem->AllocBuf(mem->devID, beta->unitNum * beta->unitSize);
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论