Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
N
NiuTrans.Tensor
概览
Overview
Details
Activity
Cycle Analytics
版本库
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
问题
0
Issues
0
列表
Board
标记
里程碑
合并请求
0
Merge Requests
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
Snippets
成员
Collapse sidebar
Close sidebar
活动
图像
聊天
创建新问题
作业
提交
Issue Boards
Open sidebar
杨迪
NiuTrans.Tensor
Commits
823abb4f
Commit
823abb4f
authored
Oct 30, 2019
by
liyinqiao
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Swap the name of old and v2 interfaces in XCall.*.
parent
f5149a15
全部展开
隐藏空白字符变更
内嵌
并排
正在显示
118 个修改的文件
包含
754 行增加
和
754 行删除
+754
-754
source/network/Main.cpp
+8
-8
source/network/XBackwardMath.cpp
+39
-39
source/network/XBackwardShape.cpp
+4
-4
source/sample/fnnlm/FNNLM.cpp
+18
-18
source/sample/transformer/T2TAttention.cpp
+8
-8
source/sample/transformer/T2TBatchLoader.cpp
+15
-15
source/sample/transformer/T2TEmbedding.cpp
+4
-4
source/sample/transformer/T2TFNN.cpp
+4
-4
source/sample/transformer/T2TLayerNormal.cpp
+2
-2
source/sample/transformer/T2TModel.cpp
+17
-17
source/sample/transformer/T2TOutput.cpp
+1
-1
source/sample/transformer/T2TPredictor.cpp
+8
-8
source/sample/transformer/T2TSearch.cpp
+15
-15
source/sample/transformer/T2TTrainer.cpp
+7
-7
source/tensor/XCall.cpp
+0
-0
source/tensor/XCall.h
+0
-0
source/tensor/XTensor.cpp
+2
-2
source/tensor/core/arithmetic/Div.cpp
+1
-1
source/tensor/core/arithmetic/DivDim.cpp
+1
-1
source/tensor/core/arithmetic/Mask.cpp
+1
-1
source/tensor/core/arithmetic/MatrixMul.cpp
+7
-7
source/tensor/core/arithmetic/MatrixMulBatched.cpp
+3
-3
source/tensor/core/arithmetic/MulAndShift.cpp
+2
-2
source/tensor/core/arithmetic/Multiply.cpp
+1
-1
source/tensor/core/arithmetic/MultiplyDim.cpp
+4
-4
source/tensor/core/arithmetic/Sub.cpp
+1
-1
source/tensor/core/arithmetic/SubDim.cpp
+1
-1
source/tensor/core/arithmetic/Sum.cpp
+1
-1
source/tensor/core/arithmetic/SumDim.cpp
+4
-4
source/tensor/core/getandset/ConvertDataType.cpp
+1
-1
source/tensor/core/getandset/OnehotAndIndex.cpp
+3
-3
source/tensor/core/getandset/SetData.cpp
+2
-2
source/tensor/core/math/Binary.cpp
+1
-1
source/tensor/core/math/Clip.cpp
+1
-1
source/tensor/core/math/Compare.cpp
+1
-1
source/tensor/core/math/Normalize.cpp
+1
-1
source/tensor/core/math/ScaleAndShift.cpp
+1
-1
source/tensor/core/math/Unary.cpp
+1
-1
source/tensor/core/reduce/ReduceMean.cpp
+1
-1
source/tensor/core/reduce/ReduceSum.cpp
+2
-2
source/tensor/core/reduce/ReduceSumAll.cpp
+3
-3
source/tensor/core/reduce/ReduceSumSquared.cpp
+1
-1
source/tensor/core/reduce/ReduceVariance.cpp
+1
-1
source/tensor/core/shape/Concatenate.cpp
+2
-2
source/tensor/core/shape/Merge.cpp
+1
-1
source/tensor/core/shape/Reshape.cpp
+1
-1
source/tensor/core/shape/Split.cpp
+1
-1
source/tensor/core/shape/Squeeze.cpp
+1
-1
source/tensor/core/shape/Unsqueeze.cpp
+1
-1
source/tensor/function/Dropout.cpp
+7
-7
source/tensor/function/DropoutWithIndex.cpp
+1
-1
source/tensor/function/HardTanH.cpp
+1
-1
source/tensor/function/Identity.cpp
+1
-1
source/tensor/function/LogSoftmax.cpp
+11
-11
source/tensor/function/Loss.cu
+7
-7
source/tensor/function/Rectify.cpp
+1
-1
source/tensor/function/Sigmoid.cpp
+1
-1
source/tensor/function/Softmax.cpp
+3
-3
source/tensor/function/Softmax.cu
+1
-1
source/tensor/loss/CrossEntropy.cpp
+2
-2
source/tensor/loss/CrossEntropy.cu
+4
-4
source/tensor/test/TAbsolute.cpp
+6
-6
source/tensor/test/TClip.cpp
+6
-6
source/tensor/test/TCompare.cpp
+6
-6
source/tensor/test/TConcatenate.cpp
+24
-24
source/tensor/test/TConcatenateSolely.cpp
+18
-18
source/tensor/test/TConvertDataType.cpp
+17
-17
source/tensor/test/TCopyIndexed.cpp
+50
-50
source/tensor/test/TCopyValues.cpp
+4
-4
source/tensor/test/TCos.cpp
+6
-6
source/tensor/test/TCrossEntropy.cpp
+22
-22
source/tensor/test/TDiv.cpp
+8
-8
source/tensor/test/TDivDim.cpp
+16
-16
source/tensor/test/TDropout.cpp
+12
-12
source/tensor/test/TExp.cpp
+6
-6
source/tensor/test/TGather.cpp
+6
-6
source/tensor/test/THardTanH.cpp
+12
-12
source/tensor/test/TIdentity.cpp
+12
-12
source/tensor/test/TLog.cpp
+6
-6
source/tensor/test/TLogSoftmax.cpp
+24
-24
source/tensor/test/TLoss.cpp
+12
-12
source/tensor/test/TMatrixMul.cpp
+24
-24
source/tensor/test/TMatrixMul2D.cpp
+12
-12
source/tensor/test/TMatrixMul2DParallel.cpp
+6
-6
source/tensor/test/TMatrixMulBatched.cpp
+12
-12
source/tensor/test/TMerge.cpp
+22
-22
source/tensor/test/TMultiply.cpp
+8
-8
source/tensor/test/TMultiplyDim.cpp
+16
-16
source/tensor/test/TNegate.cpp
+12
-12
source/tensor/test/TNormalize.cpp
+14
-14
source/tensor/test/TPower.cpp
+18
-18
source/tensor/test/TRectify.cpp
+12
-12
source/tensor/test/TReduceMax.cpp
+6
-6
source/tensor/test/TReduceMean.cpp
+6
-6
source/tensor/test/TReduceSum.cpp
+35
-35
source/tensor/test/TReduceSumAll.cpp
+2
-2
source/tensor/test/TReduceSumSquared.cpp
+12
-12
source/tensor/test/TReduceVariance.cpp
+6
-6
source/tensor/test/TRound.cpp
+6
-6
source/tensor/test/TScaleAndShift.cpp
+6
-6
source/tensor/test/TSelect.cpp
+0
-0
source/tensor/test/TSetAscendingOrder.cpp
+0
-0
source/tensor/test/TSetData.cpp
+0
-0
source/tensor/test/TSigmoid.cpp
+0
-0
source/tensor/test/TSign.cpp
+0
-0
source/tensor/test/TSin.cpp
+0
-0
source/tensor/test/TSoftmax.cpp
+0
-0
source/tensor/test/TSort.cpp
+0
-0
source/tensor/test/TSplit.cpp
+0
-0
source/tensor/test/TSpread.cpp
+0
-0
source/tensor/test/TSub.cpp
+0
-0
source/tensor/test/TSubDim.cpp
+0
-0
source/tensor/test/TSum.cpp
+0
-0
source/tensor/test/TSumDim.cpp
+0
-0
source/tensor/test/TTan.cpp
+0
-0
source/tensor/test/TTopK.cpp
+0
-0
source/tensor/test/TTranspose.cpp
+0
-0
source/tensor/test/TUnsqueeze.cpp
+0
-0
没有找到文件。
source/network/Main.cpp
查看文件 @
823abb4f
...
@@ -76,8 +76,8 @@ void BackwardTest()
...
@@ -76,8 +76,8 @@ void BackwardTest()
c
.
enableGrad
=
false
;
c
.
enableGrad
=
false
;
XTensor
mean
;
XTensor
mean
;
XTensor
origin
;
XTensor
origin
;
InitTensor2D
(
&
a
,
2
,
3
);
InitTensor2D
V2
(
&
a
,
2
,
3
);
InitTensor1D
(
&
b
,
2
);
InitTensor1D
V2
(
&
b
,
2
);
a
.
SetZeroAll
();
a
.
SetZeroAll
();
b
.
SetZeroAll
();
b
.
SetZeroAll
();
...
@@ -121,9 +121,9 @@ void TransposeTest()
...
@@ -121,9 +121,9 @@ void TransposeTest()
int
nnn
=
GDevs
.
nGPU
;
int
nnn
=
GDevs
.
nGPU
;
InitTensor3D
(
&
x
,
B
,
N
,
H
,
X_FLOAT
,
0
);
InitTensor3D
V2
(
&
x
,
B
,
N
,
H
,
X_FLOAT
,
0
);
InitTensor4D
(
&
y
,
K
,
B
,
N
,
H
/
K
,
X_FLOAT
,
0
);
InitTensor4D
V2
(
&
y
,
K
,
B
,
N
,
H
/
K
,
X_FLOAT
,
0
);
InitTensor3D
(
&
z
,
B
,
N
,
H
,
X_FLOAT
,
0
);
InitTensor3D
V2
(
&
z
,
B
,
N
,
H
,
X_FLOAT
,
0
);
cudaEvent_t
ctime0
;
cudaEvent_t
ctime0
;
cudaEvent_t
ctime1
;
cudaEvent_t
ctime1
;
...
@@ -191,9 +191,9 @@ void SumDimTest()
...
@@ -191,9 +191,9 @@ void SumDimTest()
int
b
=
7
;
int
b
=
7
;
int
c
=
3
;
int
c
=
3
;
InitTensor3D
(
&
x
,
a
,
b
,
c
,
X_FLOAT
,
-
1
);
InitTensor3D
V2
(
&
x
,
a
,
b
,
c
,
X_FLOAT
,
-
1
);
InitTensor1D
(
&
y
,
c
,
X_FLOAT
,
-
1
);
InitTensor1D
V2
(
&
y
,
c
,
X_FLOAT
,
-
1
);
InitTensor3D
(
&
z
,
a
,
b
,
c
,
X_FLOAT
,
-
1
);
InitTensor3D
V2
(
&
z
,
a
,
b
,
c
,
X_FLOAT
,
-
1
);
x
.
SetZeroAll
();
x
.
SetZeroAll
();
y
.
SetZeroAll
();
y
.
SetZeroAll
();
...
...
source/network/XBackwardMath.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/network/XBackwardShape.cpp
查看文件 @
823abb4f
...
@@ -391,7 +391,7 @@ void XShapeGrad::GradSplit(XTensor * node, bool isEfficient)
...
@@ -391,7 +391,7 @@ void XShapeGrad::GradSplit(XTensor * node, bool isEfficient)
/* if the tensor is used somewhere else, we need another SUM
/* if the tensor is used somewhere else, we need another SUM
for gradient accumulation */
for gradient accumulation */
else
{
else
{
XTensor
*
inputGradTMP
=
NewTensorBuf
(
input
,
input
->
devID
,
input
->
mem
);
XTensor
*
inputGradTMP
=
NewTensorBuf
V2
(
input
,
input
->
devID
,
input
->
mem
);
_Merge
(
node
->
grad
,
inputGradTMP
,
whereToSplit
+
1
,
0
);
_Merge
(
node
->
grad
,
inputGradTMP
,
whereToSplit
+
1
,
0
);
_Sum
(
input
->
grad
,
inputGradTMP
,
input
->
grad
);
_Sum
(
input
->
grad
,
inputGradTMP
,
input
->
grad
);
...
@@ -475,7 +475,7 @@ void XShapeGrad::GradSplitListPost(XTensor * node, bool isEfficient)
...
@@ -475,7 +475,7 @@ void XShapeGrad::GradSplitListPost(XTensor * node, bool isEfficient)
somewhere else, we need another SUM for gradient
somewhere else, we need another SUM for gradient
accumulation */
accumulation */
else
{
else
{
XTensor
*
nodeGradTMP
=
NewTensorBuf
(
node
,
node
->
devID
,
node
->
mem
);
XTensor
*
nodeGradTMP
=
NewTensorBuf
V2
(
node
,
node
->
devID
,
node
->
mem
);
_Merge
(
&
splits
,
nodeGradTMP
,
whereToSplit
+
1
);
_Merge
(
&
splits
,
nodeGradTMP
,
whereToSplit
+
1
);
_Sum
(
node
->
grad
,
nodeGradTMP
,
node
->
grad
);
_Sum
(
node
->
grad
,
nodeGradTMP
,
node
->
grad
);
...
@@ -501,7 +501,7 @@ void XShapeGrad::GradTranspose(XTensor * node, bool isEfficient)
...
@@ -501,7 +501,7 @@ void XShapeGrad::GradTranspose(XTensor * node, bool isEfficient)
XTensor
*
output
=
node
;
XTensor
*
output
=
node
;
XTensor
*
input
=
income
.
tails
[
0
];
XTensor
*
input
=
income
.
tails
[
0
];
XTensor
*
b
=
NewTensorBuf
(
input
,
input
->
devID
,
input
->
mem
);
XTensor
*
b
=
NewTensorBuf
V2
(
input
,
input
->
devID
,
input
->
mem
);
XNoder
::
MakeGrad
(
input
);
XNoder
::
MakeGrad
(
input
);
int
i
=
income
.
GetParamInt
(
0
);
int
i
=
income
.
GetParamInt
(
0
);
...
@@ -543,7 +543,7 @@ void XShapeGrad::GradUnsqueeze(XTensor * node, bool isEfficient)
...
@@ -543,7 +543,7 @@ void XShapeGrad::GradUnsqueeze(XTensor * node, bool isEfficient)
CheckNTErrors
(
dSize
==
output
->
GetDim
(
dim
),
"Wrong dim size for UNSQUEEZE!"
);
CheckNTErrors
(
dSize
==
output
->
GetDim
(
dim
),
"Wrong dim size for UNSQUEEZE!"
);
CheckNTErrors
(
output
->
unitNum
=
input
->
unitNum
*
dSize
,
"Wrong tensor size!"
);
CheckNTErrors
(
output
->
unitNum
=
input
->
unitNum
*
dSize
,
"Wrong tensor size!"
);
XTensor
*
g
=
NewTensorBuf
(
input
->
grad
,
input
->
devID
,
input
->
mem
);
XTensor
*
g
=
NewTensorBuf
V2
(
input
->
grad
,
input
->
devID
,
input
->
mem
);
_ReduceSum
(
output
->
grad
,
g
,
dim
);
_ReduceSum
(
output
->
grad
,
g
,
dim
);
_Sum
(
input
->
grad
,
g
,
input
->
grad
);
_Sum
(
input
->
grad
,
g
,
input
->
grad
);
...
...
source/sample/fnnlm/FNNLM.cpp
查看文件 @
823abb4f
...
@@ -242,13 +242,13 @@ void Check(FNNModel &model)
...
@@ -242,13 +242,13 @@ void Check(FNNModel &model)
/* make a hard copy of the fnn model */
/* make a hard copy of the fnn model */
void
Copy
(
FNNModel
&
tgt
,
FNNModel
&
src
)
void
Copy
(
FNNModel
&
tgt
,
FNNModel
&
src
)
{
{
InitTensor
V2
(
&
tgt
.
embeddingW
,
&
src
.
embeddingW
);
InitTensor
(
&
tgt
.
embeddingW
,
&
src
.
embeddingW
);
for
(
int
i
=
0
;
i
<
MAX_HIDDEN_NUM
;
i
++
){
for
(
int
i
=
0
;
i
<
MAX_HIDDEN_NUM
;
i
++
){
InitTensor
V2
(
&
tgt
.
hiddenW
[
i
],
&
src
.
hiddenW
[
i
]);
InitTensor
(
&
tgt
.
hiddenW
[
i
],
&
src
.
hiddenW
[
i
]);
InitTensor
V2
(
&
tgt
.
hiddenB
[
i
],
&
src
.
hiddenB
[
i
]);
InitTensor
(
&
tgt
.
hiddenB
[
i
],
&
src
.
hiddenB
[
i
]);
}
}
InitTensor
V2
(
&
tgt
.
outputW
,
&
src
.
outputW
);
InitTensor
(
&
tgt
.
outputW
,
&
src
.
outputW
);
InitTensor
V2
(
&
tgt
.
outputB
,
&
src
.
outputB
);
InitTensor
(
&
tgt
.
outputB
,
&
src
.
outputB
);
tgt
.
n
=
src
.
n
;
tgt
.
n
=
src
.
n
;
tgt
.
eSize
=
src
.
eSize
;
tgt
.
eSize
=
src
.
eSize
;
...
@@ -300,7 +300,7 @@ initialize a 1d tensor using the fnn model setting
...
@@ -300,7 +300,7 @@ initialize a 1d tensor using the fnn model setting
*/
*/
void
InitModelTensor1D
(
XTensor
&
tensor
,
int
num
,
FNNModel
&
model
)
void
InitModelTensor1D
(
XTensor
&
tensor
,
int
num
,
FNNModel
&
model
)
{
{
InitTensor1D
V2
(
&
tensor
,
num
,
X_FLOAT
,
model
.
devID
);
InitTensor1D
(
&
tensor
,
num
,
X_FLOAT
,
model
.
devID
);
}
}
/*
/*
...
@@ -312,7 +312,7 @@ initialize a 2d tensor using the fnn model setting
...
@@ -312,7 +312,7 @@ initialize a 2d tensor using the fnn model setting
*/
*/
void
InitModelTensor2D
(
XTensor
&
tensor
,
int
rowNum
,
int
colNum
,
FNNModel
&
model
)
void
InitModelTensor2D
(
XTensor
&
tensor
,
int
rowNum
,
int
colNum
,
FNNModel
&
model
)
{
{
InitTensor2D
V2
(
&
tensor
,
rowNum
,
colNum
,
X_FLOAT
,
model
.
devID
);
InitTensor2D
(
&
tensor
,
rowNum
,
colNum
,
X_FLOAT
,
model
.
devID
);
}
}
...
@@ -594,14 +594,14 @@ get prediction probabilites of the gold words
...
@@ -594,14 +594,14 @@ get prediction probabilites of the gold words
float
GetProb
(
XTensor
&
output
,
XTensor
&
gold
,
XTensor
*
wordProbs
)
float
GetProb
(
XTensor
&
output
,
XTensor
&
gold
,
XTensor
*
wordProbs
)
{
{
XTensor
probs
;
XTensor
probs
;
InitTensor
V2
(
&
probs
,
&
output
);
InitTensor
(
&
probs
,
&
output
);
/* probs[i,j] = output[i,j] * gold[i,j] */
/* probs[i,j] = output[i,j] * gold[i,j] */
Multiply
(
output
,
gold
,
probs
);
Multiply
(
output
,
gold
,
probs
);
/* probability of each word */
/* probability of each word */
XTensor
wprobs
;
XTensor
wprobs
;
InitTensor1D
V2
(
&
wprobs
,
output
.
GetDim
(
0
),
output
.
dataType
,
output
.
devID
);
InitTensor1D
(
&
wprobs
,
output
.
GetDim
(
0
),
output
.
dataType
,
output
.
devID
);
ReduceSum
(
probs
,
wprobs
,
1
);
ReduceSum
(
probs
,
wprobs
,
1
);
if
(
wordProbs
!=
NULL
)
if
(
wordProbs
!=
NULL
)
CopyValues
(
wprobs
,
*
wordProbs
);
CopyValues
(
wprobs
,
*
wordProbs
);
...
@@ -615,7 +615,7 @@ float GetProb(XTensor &output, XTensor &gold, XTensor * wordProbs)
...
@@ -615,7 +615,7 @@ float GetProb(XTensor &output, XTensor &gold, XTensor * wordProbs)
/* probability for the batch */
/* probability for the batch */
XTensor
result
;
XTensor
result
;
InitTensor1D
V2
(
&
result
,
1
,
X_FLOAT
,
output
.
devID
);
InitTensor1D
(
&
result
,
1
,
X_FLOAT
,
output
.
devID
);
ReduceSum
(
probs
,
result
,
1
);
ReduceSum
(
probs
,
result
,
1
);
return
result
.
Get1D
(
0
);
return
result
.
Get1D
(
0
);
...
@@ -716,7 +716,7 @@ The indexed cell is set to 1, and 0 otherwise.
...
@@ -716,7 +716,7 @@ The indexed cell is set to 1, and 0 otherwise.
void
InitZeroOneTensor2D
(
XTensor
&
tensor
,
int
rowNum
,
int
colNum
,
int
*
rows
,
int
*
cols
,
void
InitZeroOneTensor2D
(
XTensor
&
tensor
,
int
rowNum
,
int
colNum
,
int
*
rows
,
int
*
cols
,
int
itemNum
,
int
devID
)
int
itemNum
,
int
devID
)
{
{
InitTensor2D
V2
(
&
tensor
,
rowNum
,
colNum
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
tensor
,
rowNum
,
colNum
,
X_FLOAT
,
devID
);
tensor
.
SetZeroAll
();
tensor
.
SetZeroAll
();
...
@@ -808,7 +808,7 @@ void Forward(XTensor inputs[], XTensor &output, FNNModel &model, FNNNet &net)
...
@@ -808,7 +808,7 @@ void Forward(XTensor inputs[], XTensor &output, FNNModel &model, FNNNet &net)
/* make a 2d tensor for the bias term */
/* make a 2d tensor for the bias term */
XTensor
b2D
;
XTensor
b2D
;
InitTensor
V2
(
&
b2D
,
&
s
);
InitTensor
(
&
b2D
,
&
s
);
Unsqueeze
(
b
,
b2D
,
0
,
batchSize
);
Unsqueeze
(
b
,
b2D
,
0
,
batchSize
);
/* introduce bias term:
/* introduce bias term:
...
@@ -840,7 +840,7 @@ void Forward(XTensor inputs[], XTensor &output, FNNModel &model, FNNNet &net)
...
@@ -840,7 +840,7 @@ void Forward(XTensor inputs[], XTensor &output, FNNModel &model, FNNNet &net)
MatrixMul
(
h_last
,
X_NOTRANS
,
w
,
X_NOTRANS
,
s
);
MatrixMul
(
h_last
,
X_NOTRANS
,
w
,
X_NOTRANS
,
s
);
XTensor
b2D
;
XTensor
b2D
;
InitTensor
V2
(
&
b2D
,
&
s
);
InitTensor
(
&
b2D
,
&
s
);
Unsqueeze
(
b
,
b2D
,
0
,
batchSize
);
Unsqueeze
(
b
,
b2D
,
0
,
batchSize
);
Sum
(
s
,
b2D
,
s
);
Sum
(
s
,
b2D
,
s
);
...
@@ -905,8 +905,8 @@ void Backward(XTensor inputs[], XTensor &output, XTensor &gold, LOSS_FUNCTION_NA
...
@@ -905,8 +905,8 @@ void Backward(XTensor inputs[], XTensor &output, XTensor &gold, LOSS_FUNCTION_NA
XTensor
dedsHidden
;
XTensor
dedsHidden
;
XTensor
dedxBottom
;
XTensor
dedxBottom
;
if
(
depth
>
0
)
if
(
depth
>
0
)
InitTensor
V2
(
&
dedsHidden
,
&
dedx
);
InitTensor
(
&
dedsHidden
,
&
dedx
);
InitTensor
V2
(
&
dedxBottom
,
&
net
.
embeddingCat
);
InitTensor
(
&
dedxBottom
,
&
net
.
embeddingCat
);
/* back-propagation from top to bottom in the stack of hidden layers
/* back-propagation from top to bottom in the stack of hidden layers
for each layer, h = f(s)
for each layer, h = f(s)
...
@@ -944,7 +944,7 @@ void Backward(XTensor inputs[], XTensor &output, XTensor &gold, LOSS_FUNCTION_NA
...
@@ -944,7 +944,7 @@ void Backward(XTensor inputs[], XTensor &output, XTensor &gold, LOSS_FUNCTION_NA
/* back-propagation for the embedding layer */
/* back-propagation for the embedding layer */
for
(
int
i
=
0
;
i
<
n
-
1
;
i
++
)
{
for
(
int
i
=
0
;
i
<
n
-
1
;
i
++
)
{
XTensor
*
dedy
=
NewTensor2D
V2
(
batchSize
,
model
.
eSize
,
X_FLOAT
,
model
.
devID
);
XTensor
*
dedy
=
NewTensor2D
(
batchSize
,
model
.
eSize
,
X_FLOAT
,
model
.
devID
);
eList
.
Add
(
dedy
);
eList
.
Add
(
dedy
);
}
}
...
@@ -996,7 +996,7 @@ void ForwardAutoDiff(NGram * ngrams, int batch, XTensor &output, FNNModel &model
...
@@ -996,7 +996,7 @@ void ForwardAutoDiff(NGram * ngrams, int batch, XTensor &output, FNNModel &model
}
}
}
}
InitTensor1D
V2
(
&
words
,
size
,
X_INT
,
model
.
devID
);
InitTensor1D
(
&
words
,
size
,
X_INT
,
model
.
devID
);
words
.
SetData
(
index
,
size
);
words
.
SetData
(
index
,
size
);
embeddingBig
=
Gather
(
model
.
embeddingW
,
words
);
embeddingBig
=
Gather
(
model
.
embeddingW
,
words
);
...
@@ -1176,7 +1176,7 @@ void Test(const char * test, const char * result, FNNModel &model)
...
@@ -1176,7 +1176,7 @@ void Test(const char * test, const char * result, FNNModel &model)
/* prediction probabilities */
/* prediction probabilities */
XTensor
probs
;
XTensor
probs
;
InitTensor1D
V2
(
&
probs
,
ngramNum
);
InitTensor1D
(
&
probs
,
ngramNum
);
/* get probabilities */
/* get probabilities */
float
prob
=
GetProb
(
output
,
gold
,
&
probs
);
float
prob
=
GetProb
(
output
,
gold
,
&
probs
);
...
...
source/sample/transformer/T2TAttention.cpp
查看文件 @
823abb4f
...
@@ -69,11 +69,11 @@ void T2TAttention::InitModel(int argc, char ** argv,
...
@@ -69,11 +69,11 @@ void T2TAttention::InitModel(int argc, char ** argv,
LoadParamFloat
(
argc
,
argv
,
"attminmax"
,
&
minmax
,
0.1
F
);
LoadParamFloat
(
argc
,
argv
,
"attminmax"
,
&
minmax
,
0.1
F
);
LoadParamFloat
(
argc
,
argv
,
"dropoutatt"
,
&
dropoutP
,
0
);
LoadParamFloat
(
argc
,
argv
,
"dropoutatt"
,
&
dropoutP
,
0
);
InitTensor2D
V2
(
&
wk
,
d
,
dk
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
wk
,
d
,
dk
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
&
wq
,
d
,
dk
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
wq
,
d
,
dk
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
&
wv
,
d
,
dv
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
wv
,
d
,
dv
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
&
wa
,
d
,
d
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
wa
,
d
,
d
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
&
wbig
,
d
,
3
*
d
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
wbig
,
d
,
3
*
d
,
X_FLOAT
,
devID
);
float
scale
=
1.0
F
;
float
scale
=
1.0
F
;
_SetDataFanInOut
(
&
wk
,
scale
);
_SetDataFanInOut
(
&
wk
,
scale
);
...
@@ -128,9 +128,9 @@ XTensor T2TAttention::MakeBig(XTensor &kqv, XTensor &mask, bool isTraining)
...
@@ -128,9 +128,9 @@ XTensor T2TAttention::MakeBig(XTensor &kqv, XTensor &mask, bool isTraining)
int
d2
=
kqv2
.
GetDim
(
1
);
int
d2
=
kqv2
.
GetDim
(
1
);
int
d3
=
kqv2
.
GetDim
(
2
)
/
3
;
int
d3
=
kqv2
.
GetDim
(
2
)
/
3
;
InitTensor3D
V2
(
&
k2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
);
InitTensor3D
(
&
k2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
);
InitTensor3D
V2
(
&
q2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
);
InitTensor3D
(
&
q2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
);
InitTensor3D
V2
(
&
v2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
);
InitTensor3D
(
&
v2
,
d1
,
d2
,
d3
,
X_FLOAT
,
devID
);
split
.
Add
(
&
q2
);
split
.
Add
(
&
q2
);
split
.
Add
(
&
k2
);
split
.
Add
(
&
k2
);
...
...
source/sample/transformer/T2TBatchLoader.cpp
查看文件 @
823abb4f
...
@@ -365,11 +365,11 @@ int T2TBatchLoader::LoadBatchLM(FILE * file,
...
@@ -365,11 +365,11 @@ int T2TBatchLoader::LoadBatchLM(FILE * file,
dims
[
1
]
=
max
;
dims
[
1
]
=
max
;
dims
[
2
]
=
vSize
;
dims
[
2
]
=
vSize
;
InitTensor2D
V2
(
batchEnc
,
sc
,
max
,
X_INT
,
devID
);
InitTensor2D
(
batchEnc
,
sc
,
max
,
X_INT
,
devID
);
InitTensor2D
V2
(
label
,
sc
,
max
,
X_INT
,
devID
);
InitTensor2D
(
label
,
sc
,
max
,
X_INT
,
devID
);
InitTensor
V2
(
gold
,
3
,
dims
,
X_FLOAT
,
devID
);
InitTensor
(
gold
,
3
,
dims
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
paddingEnc
,
sc
,
max
,
X_FLOAT
,
devID
);
InitTensor2D
(
paddingEnc
,
sc
,
max
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
paddingDec
,
sc
,
max
,
X_FLOAT
,
devID
);
InitTensor2D
(
paddingDec
,
sc
,
max
,
X_FLOAT
,
devID
);
batchEnc
->
SetZeroAll
();
batchEnc
->
SetZeroAll
();
label
->
SetZeroAll
();
label
->
SetZeroAll
();
...
@@ -433,12 +433,12 @@ int T2TBatchLoader::LoadBatchLM(FILE * file,
...
@@ -433,12 +433,12 @@ int T2TBatchLoader::LoadBatchLM(FILE * file,
paddingEnc
->
SetDataBatched
(
paddingEncOffsets
,
1.0
F
,
wCount
);
paddingEnc
->
SetDataBatched
(
paddingEncOffsets
,
1.0
F
,
wCount
);
paddingDec
->
SetDataBatched
(
paddingDecOffsets
,
1.0
F
,
wCount
);
paddingDec
->
SetDataBatched
(
paddingDecOffsets
,
1.0
F
,
wCount
);
/*XTensor * tmp = NewTensorBuf
V2
(paddingEnc, devID);
/*XTensor * tmp = NewTensorBuf(paddingEnc, devID);
_ConvertDataType(batchEnc, tmp);
_ConvertDataType(batchEnc, tmp);
_NotEqual(tmp, paddingEnc, 0);
_NotEqual(tmp, paddingEnc, 0);
DelTensorBuf(tmp);
DelTensorBuf(tmp);
XTensor * tmp2 = NewTensorBuf
V2
(paddingDec, devID);
XTensor * tmp2 = NewTensorBuf(paddingDec, devID);
_ConvertDataType(batchEnc, tmp2);
_ConvertDataType(batchEnc, tmp2);
_NotEqual(tmp2, paddingDec, 0);
_NotEqual(tmp2, paddingDec, 0);
DelTensorBuf(tmp2);*/
DelTensorBuf(tmp2);*/
...
@@ -563,12 +563,12 @@ int T2TBatchLoader::LoadBatchMT(FILE * file,
...
@@ -563,12 +563,12 @@ int T2TBatchLoader::LoadBatchMT(FILE * file,
int
sCount
=
sc
/
2
;
int
sCount
=
sc
/
2
;
int
seqSize
=
0
;
int
seqSize
=
0
;
InitTensor2D
V2
(
batchEnc
,
sCount
,
maxEnc
,
X_INT
,
devID
);
InitTensor2D
(
batchEnc
,
sCount
,
maxEnc
,
X_INT
,
devID
);
InitTensor2D
V2
(
paddingEnc
,
sCount
,
maxEnc
,
X_FLOAT
,
devID
);
InitTensor2D
(
paddingEnc
,
sCount
,
maxEnc
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
batchDec
,
sCount
,
maxDec
,
X_INT
,
devID
);
InitTensor2D
(
batchDec
,
sCount
,
maxDec
,
X_INT
,
devID
);
InitTensor2D
V2
(
paddingDec
,
sCount
,
maxDec
,
X_FLOAT
,
devID
);
InitTensor2D
(
paddingDec
,
sCount
,
maxDec
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
label
,
sCount
,
maxDec
,
X_INT
,
devID
);
InitTensor2D
(
label
,
sCount
,
maxDec
,
X_INT
,
devID
);
//InitTensor
V2
(gold, 3, dimsDec, X_FLOAT, devID);
//InitTensor(gold, 3, dimsDec, X_FLOAT, devID);
batchEnc
->
SetZeroAll
();
batchEnc
->
SetZeroAll
();
paddingEnc
->
SetZeroAll
();
paddingEnc
->
SetZeroAll
();
...
@@ -607,7 +607,7 @@ int T2TBatchLoader::LoadBatchMT(FILE * file,
...
@@ -607,7 +607,7 @@ int T2TBatchLoader::LoadBatchMT(FILE * file,
ws
=
wCountEnc
;
ws
=
wCountEnc
;
batchEnc
->
SetData
(
batchEncValues
,
batchEnc
->
unitNum
);
batchEnc
->
SetData
(
batchEncValues
,
batchEnc
->
unitNum
);
paddingEnc
->
SetDataBatched
(
paddingEncOffsets
,
1.0
F
,
wCountEnc
);
paddingEnc
->
SetDataBatched
(
paddingEncOffsets
,
1.0
F
,
wCountEnc
);
//XTensor * tmp = NewTensorBuf
V2
(paddingEnc, devID);
//XTensor * tmp = NewTensorBuf(paddingEnc, devID);
//_ConvertDataType(batchEnc, tmp);
//_ConvertDataType(batchEnc, tmp);
//tmp->Dump(stderr, "tmp:");
//tmp->Dump(stderr, "tmp:");
//_NotEqual(tmp, paddingEnc, 0);
//_NotEqual(tmp, paddingEnc, 0);
...
@@ -656,7 +656,7 @@ int T2TBatchLoader::LoadBatchMT(FILE * file,
...
@@ -656,7 +656,7 @@ int T2TBatchLoader::LoadBatchMT(FILE * file,
label
->
SetData
(
labelValues
,
label
->
unitNum
);
label
->
SetData
(
labelValues
,
label
->
unitNum
);
paddingDec
->
SetDataBatched
(
paddingDecOffsets
,
1.0
F
,
wCountPad
);
paddingDec
->
SetDataBatched
(
paddingDecOffsets
,
1.0
F
,
wCountPad
);
//XTensor * tmp2 = NewTensorBuf
V2
(paddingDec, devID);
//XTensor * tmp2 = NewTensorBuf(paddingDec, devID);
//_ConvertDataType(batchDec, tmp2);
//_ConvertDataType(batchDec, tmp2);
//_NotEqual(tmp2, paddingDec, 0);
//_NotEqual(tmp2, paddingDec, 0);
//DelTensorBuf(tmp2);
//DelTensorBuf(tmp2);
...
...
source/sample/transformer/T2TEmbedding.cpp
查看文件 @
823abb4f
...
@@ -61,7 +61,7 @@ void T2TEmbedder::InitModel(int argc, char ** argv, int myDevID, bool isEnc)
...
@@ -61,7 +61,7 @@ void T2TEmbedder::InitModel(int argc, char ** argv, int myDevID, bool isEnc)
LoadParamInt
(
argc
,
argv
,
"d"
,
&
eSize
,
DEFAULT_EMBEDDING_SIZE
);
LoadParamInt
(
argc
,
argv
,
"d"
,
&
eSize
,
DEFAULT_EMBEDDING_SIZE
);
LoadParamInt
(
argc
,
argv
,
"d"
,
&
d
,
DEFAULT_EMBEDDING_SIZE
);
LoadParamInt
(
argc
,
argv
,
"d"
,
&
d
,
DEFAULT_EMBEDDING_SIZE
);
InitTensor2D
V2
(
&
w
,
vSize
,
eSize
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
w
,
vSize
,
eSize
,
X_FLOAT
,
devID
);
DTYPE
v
=
1.0
F
/
(
float
)
sqrt
((
float
)
eSize
);
DTYPE
v
=
1.0
F
/
(
float
)
sqrt
((
float
)
eSize
);
w
.
SetDataRandn
(
0
,
v
);
w
.
SetDataRandn
(
0
,
v
);
...
@@ -78,7 +78,7 @@ make positional embeddings (of size eSize * length)
...
@@ -78,7 +78,7 @@ make positional embeddings (of size eSize * length)
*/
*/
void
T2TEmbedder
::
MakePosEmbedding
(
int
eSize
,
int
d
,
int
length
)
void
T2TEmbedder
::
MakePosEmbedding
(
int
eSize
,
int
d
,
int
length
)
{
{
InitTensor2D
V2
(
&
posEmbeddingBase
,
length
,
eSize
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
posEmbeddingBase
,
length
,
eSize
,
X_FLOAT
,
devID
);
float
*
data
=
new
float
[
posEmbeddingBase
.
unitNum
];
float
*
data
=
new
float
[
posEmbeddingBase
.
unitNum
];
...
@@ -142,9 +142,9 @@ XTensor T2TEmbedder::Make(XTensor &input)
...
@@ -142,9 +142,9 @@ XTensor T2TEmbedder::Make(XTensor &input)
/* we make positional embeddings first */
/* we make positional embeddings first */
//if(!match){
//if(!match){
if
(
true
){
if
(
true
){
InitTensor
V2
(
&
posEmbedding
,
input
.
order
+
1
,
dims
,
X_FLOAT
,
devID
);
InitTensor
(
&
posEmbedding
,
input
.
order
+
1
,
dims
,
X_FLOAT
,
devID
);
XTensor
*
posTMP
=
NewTensorBuf
V2
(
2
,
dims
+
1
,
X_FLOAT
,
devID
);
XTensor
*
posTMP
=
NewTensorBuf
(
2
,
dims
+
1
,
X_FLOAT
,
devID
);
_CopyValues
(
&
posEmbeddingBase
,
0
,
posTMP
->
unitNum
,
posTMP
,
0
);
_CopyValues
(
&
posEmbeddingBase
,
0
,
posTMP
->
unitNum
,
posTMP
,
0
);
_Unsqueeze
(
posTMP
,
&
posEmbedding
,
0
,
dims
[
0
]);
_Unsqueeze
(
posTMP
,
&
posEmbedding
,
0
,
dims
[
0
]);
...
...
source/sample/transformer/T2TFNN.cpp
查看文件 @
823abb4f
...
@@ -60,11 +60,11 @@ void T2TFNN::InitModel(int argc, char ** argv, int myDevID)
...
@@ -60,11 +60,11 @@ void T2TFNN::InitModel(int argc, char ** argv, int myDevID)
LoadParamFloat
(
argc
,
argv
,
"fnnminmax"
,
&
minmax
,
0.1
F
);
LoadParamFloat
(
argc
,
argv
,
"fnnminmax"
,
&
minmax
,
0.1
F
);
LoadParamFloat
(
argc
,
argv
,
"dropoutfnn"
,
&
dropoutP
,
0
);
LoadParamFloat
(
argc
,
argv
,
"dropoutfnn"
,
&
dropoutP
,
0
);
InitTensor2D
V2
(
&
w1
,
inSize
,
hSize
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
w1
,
inSize
,
hSize
,
X_FLOAT
,
devID
);
InitTensor1D
V2
(
&
b1
,
hSize
,
X_FLOAT
,
devID
);
InitTensor1D
(
&
b1
,
hSize
,
X_FLOAT
,
devID
);
InitTensor2D
V2
(
&
w2
,
hSize
,
outSize
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
w2
,
hSize
,
outSize
,
X_FLOAT
,
devID
);
InitTensor1D
V2
(
&
b2
,
outSize
,
X_FLOAT
,
devID
);
InitTensor1D
(
&
b2
,
outSize
,
X_FLOAT
,
devID
);
float
scale
=
1.0
F
;
float
scale
=
1.0
F
;
_SetDataFanInOut
(
&
w1
,
scale
);
_SetDataFanInOut
(
&
w1
,
scale
);
...
...
source/sample/transformer/T2TLayerNormal.cpp
查看文件 @
823abb4f
...
@@ -53,8 +53,8 @@ void T2TLN::InitModel(int argc, char ** argv, int myDevID)
...
@@ -53,8 +53,8 @@ void T2TLN::InitModel(int argc, char ** argv, int myDevID)
d
=
0
;
d
=
0
;
LoadParamInt
(
argc
,
argv
,
"d"
,
&
d
,
DEFAULT_EMBEDDING_SIZE
);
LoadParamInt
(
argc
,
argv
,
"d"
,
&
d
,
DEFAULT_EMBEDDING_SIZE
);
InitTensor1D
V2
(
&
w
,
d
,
X_FLOAT
,
devID
);
InitTensor1D
(
&
w
,
d
,
X_FLOAT
,
devID
);
InitTensor1D
V2
(
&
b
,
d
,
X_FLOAT
,
devID
);
InitTensor1D
(
&
b
,
d
,
X_FLOAT
,
devID
);
w
.
SetDataRand
(
1.0
F
,
1.0
F
);
w
.
SetDataRand
(
1.0
F
,
1.0
F
);
b
.
SetZeroAll
();
b
.
SetZeroAll
();
...
...
source/sample/transformer/T2TModel.cpp
查看文件 @
823abb4f
...
@@ -132,7 +132,7 @@ void T2TModel::MakeLM(XTensor &input, XTensor &output, XTensor &padding, bool is
...
@@ -132,7 +132,7 @@ void T2TModel::MakeLM(XTensor &input, XTensor &output, XTensor &padding, bool is
dims
[
0
]
=
nhead
;
dims
[
0
]
=
nhead
;
dims
[
input
.
order
+
1
]
=
len
;
dims
[
input
.
order
+
1
]
=
len
;
XTensor
mask
;
XTensor
mask
;
InitTensor
V2
(
&
mask
,
input
.
order
+
2
,
dims
,
X_FLOAT
,
padding
.
devID
);
InitTensor
(
&
mask
,
input
.
order
+
2
,
dims
,
X_FLOAT
,
padding
.
devID
);
/* a upper triangular matrix where the cells of the upper triangular are set to -1e-9.
/* a upper triangular matrix where the cells of the upper triangular are set to -1e-9.
this matrix can be used to prevent the attention to current or following words in
this matrix can be used to prevent the attention to current or following words in
...
@@ -146,14 +146,14 @@ void T2TModel::MakeLM(XTensor &input, XTensor &output, XTensor &padding, bool is
...
@@ -146,14 +146,14 @@ void T2TModel::MakeLM(XTensor &input, XTensor &output, XTensor &padding, bool is
dimsPadding
[
padding
.
order
-
1
]
=
padding
.
GetDim
(
-
1
);
dimsPadding
[
padding
.
order
-
1
]
=
padding
.
GetDim
(
-
1
);
dimsPadding
[
padding
.
order
]
=
padding
.
GetDim
(
-
1
);
dimsPadding
[
padding
.
order
]
=
padding
.
GetDim
(
-
1
);
XTensor
*
padding2
=
NewTensorBuf
V2
(
padding
.
order
+
1
,
dimsPadding
,
padding
.
dataType
,
XTensor
*
padding2
=
NewTensorBuf
(
padding
.
order
+
1
,
dimsPadding
,
padding
.
dataType
,
padding
.
devID
);
padding
.
devID
);
for
(
int
i
=
0
;
i
<
padding2
->
order
;
i
++
)
for
(
int
i
=
0
;
i
<
padding2
->
order
;
i
++
)
dimsPadding
[
i
+
1
]
=
padding2
->
GetDim
(
i
);
dimsPadding
[
i
+
1
]
=
padding2
->
GetDim
(
i
);
dimsPadding
[
0
]
=
nhead
;
dimsPadding
[
0
]
=
nhead
;
//XTensor * padding3 = NewTensorBuf
V2
(padding.order + 2, dimsPadding, padding.dataType,
//XTensor * padding3 = NewTensorBuf(padding.order + 2, dimsPadding, padding.dataType,
// padding.devID);
// padding.devID);
//
//
///* mask of the padding */
///* mask of the padding */
...
@@ -224,7 +224,7 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
...
@@ -224,7 +224,7 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
dims
[
i
+
1
]
=
inputDec
.
GetDim
(
i
);
dims
[
i
+
1
]
=
inputDec
.
GetDim
(
i
);
dims
[
0
]
=
nhead
;
dims
[
0
]
=
nhead
;
dims
[
inputDec
.
order
+
1
]
=
len
;
dims
[
inputDec
.
order
+
1
]
=
len
;
InitTensor
V2
(
&
maskDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingDec
.
devID
);
InitTensor
(
&
maskDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingDec
.
devID
);
/* an upper triangular matrix where the cells of the upper triangular are set to -1e-9.
/* an upper triangular matrix where the cells of the upper triangular are set to -1e-9.
this matrix can be used to prevent the attention to current or following words in
this matrix can be used to prevent the attention to current or following words in
...
@@ -234,11 +234,11 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
...
@@ -234,11 +234,11 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
/* encoder-decoder mask that prevents the attention to padding dummy words */
/* encoder-decoder mask that prevents the attention to padding dummy words */
dims
[
inputDec
.
order
+
1
]
=
inputEnc
.
GetDim
(
inputEnc
.
order
-
1
);
dims
[
inputDec
.
order
+
1
]
=
inputEnc
.
GetDim
(
inputEnc
.
order
-
1
);
InitTensor
V2
(
&
maskEncDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingEnc
.
devID
);
InitTensor
(
&
maskEncDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingEnc
.
devID
);
XTensor
*
maskEncDecTMPEnc
=
NewTensorBuf
V2
(
paddingEnc
.
order
+
1
,
dims
+
1
,
paddingEnc
.
dataType
,
XTensor
*
maskEncDecTMPEnc
=
NewTensorBuf
(
paddingEnc
.
order
+
1
,
dims
+
1
,
paddingEnc
.
dataType
,
paddingEnc
.
devID
);
paddingEnc
.
devID
);
XTensor
*
maskEncDecTMPDec
=
NewTensorBuf
V2
(
maskEncDecTMPEnc
,
paddingEnc
.
devID
);
XTensor
*
maskEncDecTMPDec
=
NewTensorBuf
(
maskEncDecTMPEnc
,
paddingEnc
.
devID
);
_Unsqueeze
(
&
paddingEnc
,
maskEncDecTMPEnc
,
paddingEnc
.
order
-
1
,
paddingDec
.
GetDim
(
-
1
));
_Unsqueeze
(
&
paddingEnc
,
maskEncDecTMPEnc
,
paddingEnc
.
order
-
1
,
paddingDec
.
GetDim
(
-
1
));
_ScaleAndShiftMe
(
maskEncDecTMPEnc
,
1e9
F
,
-
1e9
F
);
_ScaleAndShiftMe
(
maskEncDecTMPEnc
,
1e9
F
,
-
1e9
F
);
...
@@ -254,14 +254,14 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
...
@@ -254,14 +254,14 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
dimsPadding
[
paddingEnc
.
order
-
1
]
=
paddingEnc
.
GetDim
(
-
1
);
dimsPadding
[
paddingEnc
.
order
-
1
]
=
paddingEnc
.
GetDim
(
-
1
);
dimsPadding
[
paddingEnc
.
order
]
=
paddingEnc
.
GetDim
(
-
1
);
dimsPadding
[
paddingEnc
.
order
]
=
paddingEnc
.
GetDim
(
-
1
);
XTensor
*
padding2
=
NewTensorBuf
V2
(
paddingEnc
.
order
+
1
,
dimsPadding
,
paddingEnc
.
dataType
,
XTensor
*
padding2
=
NewTensorBuf
(
paddingEnc
.
order
+
1
,
dimsPadding
,
paddingEnc
.
dataType
,
paddingEnc
.
devID
);
paddingEnc
.
devID
);
for
(
int
i
=
0
;
i
<
padding2
->
order
;
i
++
)
for
(
int
i
=
0
;
i
<
padding2
->
order
;
i
++
)
dimsPadding
[
i
+
1
]
=
padding2
->
GetDim
(
i
);
dimsPadding
[
i
+
1
]
=
padding2
->
GetDim
(
i
);
dimsPadding
[
0
]
=
nhead
;
dimsPadding
[
0
]
=
nhead
;
XTensor
*
padding3
=
NewTensorBuf
V2
(
paddingEnc
.
order
+
2
,
dimsPadding
,
paddingEnc
.
dataType
,
XTensor
*
padding3
=
NewTensorBuf
(
paddingEnc
.
order
+
2
,
dimsPadding
,
paddingEnc
.
dataType
,
paddingEnc
.
devID
);
paddingEnc
.
devID
);
/* mask of the padding */
/* mask of the padding */
...
@@ -270,7 +270,7 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
...
@@ -270,7 +270,7 @@ void T2TModel::MakeMTMask(XTensor &inputEnc, XTensor &inputDec,
_ScaleAndShiftMe
(
padding3
,
1e9
F
,
-
1e9
F
);
_ScaleAndShiftMe
(
padding3
,
1e9
F
,
-
1e9
F
);
InitTensor
V2
(
&
maskEnc
,
padding3
);
InitTensor
(
&
maskEnc
,
padding3
);
maskEnc
.
SetZeroAll
();
maskEnc
.
SetZeroAll
();
/* generate the mask on the source language side (for padding) */
/* generate the mask on the source language side (for padding) */
...
@@ -298,14 +298,14 @@ void T2TModel::MakeMTMaskEnc(XTensor &inputEnc, XTensor &paddingEnc, XTensor &ma
...
@@ -298,14 +298,14 @@ void T2TModel::MakeMTMaskEnc(XTensor &inputEnc, XTensor &paddingEnc, XTensor &ma
dimsPadding
[
paddingEnc
.
order
-
1
]
=
paddingEnc
.
GetDim
(
-
1
);
dimsPadding
[
paddingEnc
.
order
-
1
]
=
paddingEnc
.
GetDim
(
-
1
);
dimsPadding
[
paddingEnc
.
order
]
=
paddingEnc
.
GetDim
(
-
1
);
dimsPadding
[
paddingEnc
.
order
]
=
paddingEnc
.
GetDim
(
-
1
);
XTensor
*
padding2
=
NewTensorBuf
V2
(
paddingEnc
.
order
+
1
,
dimsPadding
,
paddingEnc
.
dataType
,
XTensor
*
padding2
=
NewTensorBuf
(
paddingEnc
.
order
+
1
,
dimsPadding
,
paddingEnc
.
dataType
,
paddingEnc
.
devID
);
paddingEnc
.
devID
);
for
(
int
i
=
0
;
i
<
padding2
->
order
;
i
++
)
for
(
int
i
=
0
;
i
<
padding2
->
order
;
i
++
)
dimsPadding
[
i
+
1
]
=
padding2
->
GetDim
(
i
);
dimsPadding
[
i
+
1
]
=
padding2
->
GetDim
(
i
);
dimsPadding
[
0
]
=
nhead
;
dimsPadding
[
0
]
=
nhead
;
XTensor
*
padding3
=
NewTensorBuf
V2
(
paddingEnc
.
order
+
2
,
dimsPadding
,
paddingEnc
.
dataType
,
XTensor
*
padding3
=
NewTensorBuf
(
paddingEnc
.
order
+
2
,
dimsPadding
,
paddingEnc
.
dataType
,
paddingEnc
.
devID
);
paddingEnc
.
devID
);
/* mask of the padding */
/* mask of the padding */
...
@@ -314,7 +314,7 @@ void T2TModel::MakeMTMaskEnc(XTensor &inputEnc, XTensor &paddingEnc, XTensor &ma
...
@@ -314,7 +314,7 @@ void T2TModel::MakeMTMaskEnc(XTensor &inputEnc, XTensor &paddingEnc, XTensor &ma
_ScaleAndShiftMe
(
padding3
,
1e9
F
,
-
1e9
F
);
_ScaleAndShiftMe
(
padding3
,
1e9
F
,
-
1e9
F
);
InitTensor
V2
(
&
maskEnc
,
padding3
);
InitTensor
(
&
maskEnc
,
padding3
);
maskEnc
.
SetZeroAll
();
maskEnc
.
SetZeroAll
();
/* generate the mask on the source language side (for padding) */
/* generate the mask on the source language side (for padding) */
...
@@ -344,7 +344,7 @@ void T2TModel::MakeMTMaskDec(XTensor &inputEnc, XTensor &inputDec,
...
@@ -344,7 +344,7 @@ void T2TModel::MakeMTMaskDec(XTensor &inputEnc, XTensor &inputDec,
dims
[
i
+
1
]
=
inputDec
.
GetDim
(
i
);
dims
[
i
+
1
]
=
inputDec
.
GetDim
(
i
);
dims
[
0
]
=
nhead
;
dims
[
0
]
=
nhead
;
dims
[
inputDec
.
order
+
1
]
=
len
;
dims
[
inputDec
.
order
+
1
]
=
len
;
InitTensor
V2
(
&
maskDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingDec
.
devID
);
InitTensor
(
&
maskDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingDec
.
devID
);
/* An upper triangular matrix where the cells of the upper triangular are set to -1e-9.
/* An upper triangular matrix where the cells of the upper triangular are set to -1e-9.
This matrix can be used to block the attention to current or following words in
This matrix can be used to block the attention to current or following words in
...
@@ -359,11 +359,11 @@ void T2TModel::MakeMTMaskDec(XTensor &inputEnc, XTensor &inputDec,
...
@@ -359,11 +359,11 @@ void T2TModel::MakeMTMaskDec(XTensor &inputEnc, XTensor &inputDec,
/* encoder-decoder mask that prevents the attention to padding dummy words */
/* encoder-decoder mask that prevents the attention to padding dummy words */
dims
[
inputDec
.
order
+
1
]
=
inputEnc
.
GetDim
(
inputEnc
.
order
-
1
);
dims
[
inputDec
.
order
+
1
]
=
inputEnc
.
GetDim
(
inputEnc
.
order
-
1
);
InitTensor
V2
(
&
maskEncDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingEnc
.
devID
);
InitTensor
(
&
maskEncDec
,
inputDec
.
order
+
2
,
dims
,
X_FLOAT
,
paddingEnc
.
devID
);
XTensor
*
maskEncDecTMPEnc
=
NewTensorBuf
V2
(
paddingEnc
.
order
+
1
,
dims
+
1
,
paddingEnc
.
dataType
,
XTensor
*
maskEncDecTMPEnc
=
NewTensorBuf
(
paddingEnc
.
order
+
1
,
dims
+
1
,
paddingEnc
.
dataType
,
paddingEnc
.
devID
);
paddingEnc
.
devID
);
XTensor
*
maskEncDecTMPDec
=
NewTensorBuf
V2
(
maskEncDecTMPEnc
,
paddingEnc
.
devID
);
XTensor
*
maskEncDecTMPDec
=
NewTensorBuf
(
maskEncDecTMPEnc
,
paddingEnc
.
devID
);
_Unsqueeze
(
&
paddingEnc
,
maskEncDecTMPEnc
,
paddingEnc
.
order
-
1
,
paddingDec
.
GetDim
(
-
1
));
_Unsqueeze
(
&
paddingEnc
,
maskEncDecTMPEnc
,
paddingEnc
.
order
-
1
,
paddingDec
.
GetDim
(
-
1
));
...
...
source/sample/transformer/T2TOutput.cpp
查看文件 @
823abb4f
...
@@ -58,7 +58,7 @@ void T2TOutput::InitModel(int argc, char ** argv, int myDevID)
...
@@ -58,7 +58,7 @@ void T2TOutput::InitModel(int argc, char ** argv, int myDevID)
LoadParamInt
(
argc
,
argv
,
"d"
,
&
hSize
,
DEFAULT_EMBEDDING_SIZE
);
LoadParamInt
(
argc
,
argv
,
"d"
,
&
hSize
,
DEFAULT_EMBEDDING_SIZE
);
LoadParamFloat
(
argc
,
argv
,
"outputminmax"
,
&
minmax
,
0.08
F
);
LoadParamFloat
(
argc
,
argv
,
"outputminmax"
,
&
minmax
,
0.08
F
);
InitTensor2D
V2
(
&
w
,
hSize
,
vSize
,
X_FLOAT
,
devID
);
InitTensor2D
(
&
w
,
hSize
,
vSize
,
X_FLOAT
,
devID
);
float
scale
=
1.0
F
;
float
scale
=
1.0
F
;
float
finfout
=
(
float
)
sqrt
(
6.0
F
*
scale
/
(
hSize
+
vSize
));
float
finfout
=
(
float
)
sqrt
(
6.0
F
*
scale
/
(
hSize
+
vSize
));
...
...
source/sample/transformer/T2TPredictor.cpp
查看文件 @
823abb4f
...
@@ -105,9 +105,9 @@ void T2TPredictor::Create(T2TModel * model, XTensor * top, const XTensor * input
...
@@ -105,9 +105,9 @@ void T2TPredictor::Create(T2TModel * model, XTensor * top, const XTensor * input
dims
[
i
]
=
input
->
GetDim
(
i
);
dims
[
i
]
=
input
->
GetDim
(
i
);
dims
[
input
->
order
-
1
]
=
beamSize
;
dims
[
input
->
order
-
1
]
=
beamSize
;
InitTensor
V2
(
&
state
->
probPath
,
input
->
order
,
dims
,
X_FLOAT
,
input
->
devID
);
InitTensor
(
&
state
->
probPath
,
input
->
order
,
dims
,
X_FLOAT
,
input
->
devID
);
InitTensor
V2
(
&
state
->
nstep
,
input
->
order
,
dims
,
X_FLOAT
,
input
->
devID
);
InitTensor
(
&
state
->
nstep
,
input
->
order
,
dims
,
X_FLOAT
,
input
->
devID
);
InitTensor
V2
(
&
state
->
endMark
,
input
->
order
,
dims
,
X_INT
,
input
->
devID
);
InitTensor
(
&
state
->
endMark
,
input
->
order
,
dims
,
X_INT
,
input
->
devID
);
state
->
probPath
.
SetZeroAll
();
state
->
probPath
.
SetZeroAll
();
state
->
nstep
.
SetZeroAll
();
state
->
nstep
.
SetZeroAll
();
...
@@ -170,7 +170,7 @@ void T2TPredictor::Predict(T2TStateBundle * next, XTensor * encoding,
...
@@ -170,7 +170,7 @@ void T2TPredictor::Predict(T2TStateBundle * next, XTensor * encoding,
dims
[
i
]
=
inputEnc
->
GetDim
(
i
);
dims
[
i
]
=
inputEnc
->
GetDim
(
i
);
dims
[
inputEnc
->
order
-
1
]
=
1
;
dims
[
inputEnc
->
order
-
1
]
=
1
;
InitTensor
V2
(
&
first
,
inputEnc
->
order
,
dims
,
X_INT
,
inputEnc
->
devID
);
InitTensor
(
&
first
,
inputEnc
->
order
,
dims
,
X_INT
,
inputEnc
->
devID
);
_SetDataFixedInt
(
&
first
,
startSymbol
);
_SetDataFixedInt
(
&
first
,
startSymbol
);
/* add a new word into the input sequence of the decoder side */
/* add a new word into the input sequence of the decoder side */
...
@@ -194,7 +194,7 @@ void T2TPredictor::Predict(T2TStateBundle * next, XTensor * encoding,
...
@@ -194,7 +194,7 @@ void T2TPredictor::Predict(T2TStateBundle * next, XTensor * encoding,
dims
[
inputDec
.
order
-
1
]
=
inputDec
.
GetDim
(
-
1
);
dims
[
inputDec
.
order
-
1
]
=
inputDec
.
GetDim
(
-
1
);
XTensor
paddingDec
;
XTensor
paddingDec
;
InitTensor
V2
(
&
paddingDec
,
inputDec
.
order
,
dims
,
X_INT
,
paddingEnc
->
devID
);
InitTensor
(
&
paddingDec
,
inputDec
.
order
,
dims
,
X_INT
,
paddingEnc
->
devID
);
SetDataFixedInt
(
paddingDec
,
1
);
SetDataFixedInt
(
paddingDec
,
1
);
XTensor
maskDec
;
XTensor
maskDec
;
...
@@ -213,8 +213,8 @@ void T2TPredictor::Predict(T2TStateBundle * next, XTensor * encoding,
...
@@ -213,8 +213,8 @@ void T2TPredictor::Predict(T2TStateBundle * next, XTensor * encoding,
int
stride
=
decoding
.
GetDim
(
decoding
.
order
-
2
);
int
stride
=
decoding
.
GetDim
(
decoding
.
order
-
2
);
InitTensor1D
V2
(
&
selectSrc
,
1
,
X_INT
);
InitTensor1D
(
&
selectSrc
,
1
,
X_INT
);
InitTensor1D
V2
(
&
selectTgt
,
1
,
X_INT
);
InitTensor1D
(
&
selectTgt
,
1
,
X_INT
);
selectSrc
.
SetInt
(
stride
-
1
,
0
);
selectSrc
.
SetInt
(
stride
-
1
,
0
);
selectTgt
.
SetInt
(
0
,
0
);
selectTgt
.
SetInt
(
0
,
0
);
...
@@ -257,7 +257,7 @@ XTensor T2TPredictor::GeneratePaths(T2TStateBundle * state)
...
@@ -257,7 +257,7 @@ XTensor T2TPredictor::GeneratePaths(T2TStateBundle * state)
}
}
XTensor
path
;
XTensor
path
;
InitTensor2D
V2
(
&
path
,
state
->
stateNum
,
distance
,
X_INT
);
InitTensor2D
(
&
path
,
state
->
stateNum
,
distance
,
X_INT
);
path
.
SetZeroAll
();
path
.
SetZeroAll
();
for
(
int
i
=
0
;
i
<
state
->
stateNum
;
i
++
){
for
(
int
i
=
0
;
i
<
state
->
stateNum
;
i
++
){
...
...
source/sample/transformer/T2TSearch.cpp
查看文件 @
823abb4f
...
@@ -192,8 +192,8 @@ void T2TSearch::Score(T2TStateBundle * prev, T2TStateBundle * beam)
...
@@ -192,8 +192,8 @@ void T2TSearch::Score(T2TStateBundle * prev, T2TStateBundle * beam)
for
(
int
i
=
0
;
i
<
order
;
i
++
)
for
(
int
i
=
0
;
i
<
order
;
i
++
)
dims
[
i
]
=
prob
.
GetDim
(
i
);
dims
[
i
]
=
prob
.
GetDim
(
i
);
InitTensor
V2
(
&
score
,
&
prob
);
InitTensor
(
&
score
,
&
prob
);
InitTensor
V2
(
&
probPath
,
&
prob
);
InitTensor
(
&
probPath
,
&
prob
);
prob
.
Reshape
(
prob
.
unitNum
/
outputSize
,
outputSize
);
prob
.
Reshape
(
prob
.
unitNum
/
outputSize
,
outputSize
);
score
.
Reshape
(
score
.
unitNum
/
outputSize
,
outputSize
);
score
.
Reshape
(
score
.
unitNum
/
outputSize
,
outputSize
);
...
@@ -204,8 +204,8 @@ void T2TSearch::Score(T2TStateBundle * prev, T2TStateBundle * beam)
...
@@ -204,8 +204,8 @@ void T2TSearch::Score(T2TStateBundle * prev, T2TStateBundle * beam)
_SumDim
(
&
prob
,
&
probPathPrev
,
&
probPath
,
0
);
_SumDim
(
&
prob
,
&
probPathPrev
,
&
probPath
,
0
);
InitTensor
V2
(
&
len
,
&
lenPrev
);
InitTensor
(
&
len
,
&
lenPrev
);
InitTensor
V2
(
&
lp
,
&
lenPrev
);
InitTensor
(
&
lp
,
&
lenPrev
);
_ScaleAndShift
(
&
lenPrev
,
&
len
,
1.0
F
,
1.0
F
);
_ScaleAndShift
(
&
lenPrev
,
&
len
,
1.0
F
,
1.0
F
);
...
@@ -225,7 +225,7 @@ void T2TSearch::Score(T2TStateBundle * prev, T2TStateBundle * beam)
...
@@ -225,7 +225,7 @@ void T2TSearch::Score(T2TStateBundle * prev, T2TStateBundle * beam)
_SumDim
(
&
score
,
&
firstMask
,
&
score
,
0
);
_SumDim
(
&
score
,
&
firstMask
,
&
score
,
0
);
}
}
InitTensor
V2
(
&
mask
,
InitTensor
(
&
mask
,
prev
->
endMark
.
order
,
prev
->
endMark
.
dimSize
,
X_FLOAT
,
prev
->
endMark
.
order
,
prev
->
endMark
.
dimSize
,
X_FLOAT
,
prev
->
endMark
.
devID
);
prev
->
endMark
.
devID
);
_SetDataFixedCond
(
&
mask
,
&
prev
->
endMark
,
-
1e9
F
);
_SetDataFixedCond
(
&
mask
,
&
prev
->
endMark
,
-
1e9
F
);
...
@@ -279,11 +279,11 @@ void T2TSearch::Generate(T2TStateBundle * beam)
...
@@ -279,11 +279,11 @@ void T2TSearch::Generate(T2TStateBundle * beam)
dimsTopK
[
order
-
3
]
=
dimsBeam
[
order
-
3
];
dimsTopK
[
order
-
3
]
=
dimsBeam
[
order
-
3
];
dimsTopK
[
order
-
1
]
=
beamSize
;
dimsTopK
[
order
-
1
]
=
beamSize
;
InitTensor
V2
(
&
scoreTopK
,
order
,
dimsTopK
,
score
.
dataType
,
InitTensor
(
&
scoreTopK
,
order
,
dimsTopK
,
score
.
dataType
,
score
.
devID
);
score
.
devID
);
InitTensor
V2
(
&
index
,
order
,
dimsTopK
,
X_INT
,
InitTensor
(
&
index
,
order
,
dimsTopK
,
X_INT
,
score
.
devID
);
score
.
devID
);
InitTensor
V2
(
&
preID
,
order
,
dimsTopK
,
X_INT
,
-
1
);
InitTensor
(
&
preID
,
order
,
dimsTopK
,
X_INT
,
-
1
);
score
.
Reshape
(
order
,
dimsBeam
);
score
.
Reshape
(
order
,
dimsBeam
);
...
@@ -307,13 +307,13 @@ void T2TSearch::Generate(T2TStateBundle * beam)
...
@@ -307,13 +307,13 @@ void T2TSearch::Generate(T2TStateBundle * beam)
score
.
Reshape
(
order
,
dims
);
score
.
Reshape
(
order
,
dims
);
/* we keep the top-k scores */
/* we keep the top-k scores */
InitTensor
V2
(
&
score
,
&
scoreTopK
);
InitTensor
(
&
score
,
&
scoreTopK
);
CopyValues
(
scoreTopK
,
score
);
CopyValues
(
scoreTopK
,
score
);
/* CPU data (TODO: remove GPU->CPU data copy!!!) */
/* CPU data (TODO: remove GPU->CPU data copy!!!) */
XTensor
indexGPU
;
XTensor
indexGPU
;
indexGPU
=
CopyValues
(
index
);
indexGPU
=
CopyValues
(
index
);
//InitTensor(&indexCPU, index.order, index.dimSize, index.dataType, index.denseRatio, -1);
//InitTensor
V2
(&indexCPU, index.order, index.dimSize, index.dataType, index.denseRatio, -1);
//CopyValues(index, indexCPU);
//CopyValues(index, indexCPU);
for
(
int
i
=
0
;
i
<
indexGPU
.
unitNum
;
i
++
)
for
(
int
i
=
0
;
i
<
indexGPU
.
unitNum
;
i
++
)
...
@@ -323,9 +323,9 @@ void T2TSearch::Generate(T2TStateBundle * beam)
...
@@ -323,9 +323,9 @@ void T2TSearch::Generate(T2TStateBundle * beam)
/* sequence probability of top-k candidates */
/* sequence probability of top-k candidates */
XTensor
probPathTopK
;
XTensor
probPathTopK
;
InitTensor
V2
(
&
probPathTopK
,
&
scoreTopK
);
InitTensor
(
&
probPathTopK
,
&
scoreTopK
);
XTensor
probTopK
;
XTensor
probTopK
;
InitTensor
V2
(
&
probTopK
,
&
scoreTopK
);
InitTensor
(
&
probTopK
,
&
scoreTopK
);
for
(
int
i
=
0
;
i
<
probPath
.
order
;
i
++
)
{
for
(
int
i
=
0
;
i
<
probPath
.
order
;
i
++
)
{
dims
[
i
]
=
probPath
.
GetDim
(
i
);
dims
[
i
]
=
probPath
.
GetDim
(
i
);
...
@@ -381,7 +381,7 @@ void T2TSearch::Expand(T2TStateBundle * prev, T2TStateBundle * beam)
...
@@ -381,7 +381,7 @@ void T2TSearch::Expand(T2TStateBundle * prev, T2TStateBundle * beam)
InitTensorOnCPU
(
&
probPath
,
&
probPathRef
);
InitTensorOnCPU
(
&
probPath
,
&
probPathRef
);
InitTensorOnCPU
(
&
prediction
,
&
predictionRef
);
InitTensorOnCPU
(
&
prediction
,
&
predictionRef
);
InitTensorOnCPU
(
&
endMarkCPU
,
&
predictionRef
);
InitTensorOnCPU
(
&
endMarkCPU
,
&
predictionRef
);
InitTensor
V2
(
&
endMark
,
&
predictionRef
);
InitTensor
(
&
endMark
,
&
predictionRef
);
/* we copy the data to CPU because the frequent access to GPU is slow
/* we copy the data to CPU because the frequent access to GPU is slow
and we can speed-up the process by doing the job on CPU. */
and we can speed-up the process by doing the job on CPU. */
...
@@ -502,7 +502,7 @@ void T2TSearch::Dump(XTensor * output)
...
@@ -502,7 +502,7 @@ void T2TSearch::Dump(XTensor * output)
int
dims
[
3
]
=
{
batchSize
,
beamSize
,
maxLength
};
int
dims
[
3
]
=
{
batchSize
,
beamSize
,
maxLength
};
int
*
words
=
new
int
[
maxLength
];
int
*
words
=
new
int
[
maxLength
];
InitTensor
V2
(
output
,
3
,
dims
,
X_INT
);
InitTensor
(
output
,
3
,
dims
,
X_INT
);
SetDataFixedInt
(
*
output
,
-
1
);
SetDataFixedInt
(
*
output
,
-
1
);
/* heap for an input sentence in the batch */
/* heap for an input sentence in the batch */
...
@@ -587,7 +587,7 @@ XTensor T2TSearch::MakeFirstMask(T2TStateBundle * beam)
...
@@ -587,7 +587,7 @@ XTensor T2TSearch::MakeFirstMask(T2TStateBundle * beam)
for
(
int
i
=
0
;
i
<
order
-
1
;
i
++
)
for
(
int
i
=
0
;
i
<
order
-
1
;
i
++
)
dims
[
i
]
=
prob
.
GetDim
(
i
);
dims
[
i
]
=
prob
.
GetDim
(
i
);
InitTensor
V2
(
&
mask
,
order
-
1
,
dims
,
X_FLOAT
);
InitTensor
(
&
mask
,
order
-
1
,
dims
,
X_FLOAT
);
mask
.
SetZeroAll
();
mask
.
SetZeroAll
();
for
(
int
i
=
0
;
i
<
mask
.
unitNum
;
i
++
)
{
for
(
int
i
=
0
;
i
<
mask
.
unitNum
;
i
++
)
{
...
...
source/sample/transformer/T2TTrainer.cpp
查看文件 @
823abb4f
...
@@ -367,7 +367,7 @@ void T2TTrainer::Test(const char * fn, const char * ofn, T2TModel * model)
...
@@ -367,7 +367,7 @@ void T2TTrainer::Test(const char * fn, const char * ofn, T2TModel * model)
/* prediction probabilities */
/* prediction probabilities */
XTensor
probs
;
XTensor
probs
;
InitTensor1D
V2
(
&
probs
,
bSize
*
length
);
InitTensor1D
(
&
probs
,
bSize
*
length
);
XTensor
labelOnehot
;
XTensor
labelOnehot
;
...
@@ -452,13 +452,13 @@ get word probabilities for a batch of sequences
...
@@ -452,13 +452,13 @@ get word probabilities for a batch of sequences
float
T2TTrainer
::
GetProb
(
XTensor
*
output
,
XTensor
*
gold
,
XTensor
*
wordProbs
)
float
T2TTrainer
::
GetProb
(
XTensor
*
output
,
XTensor
*
gold
,
XTensor
*
wordProbs
)
{
{
XTensor
probs
;
XTensor
probs
;
InitTensor
(
&
probs
,
output
);
InitTensor
V2
(
&
probs
,
output
);
_Multiply
(
output
,
gold
,
&
probs
);
_Multiply
(
output
,
gold
,
&
probs
);
/* probability of each word */
/* probability of each word */
XTensor
wprobs
;
XTensor
wprobs
;
InitTensor1D
V2
(
&
wprobs
,
output
->
unitNum
/
output
->
GetDim
(
-
1
),
X_FLOAT
,
output
->
devID
);
InitTensor1D
(
&
wprobs
,
output
->
unitNum
/
output
->
GetDim
(
-
1
),
X_FLOAT
,
output
->
devID
);
int
dims
[
2
]
=
{
output
->
unitNum
/
output
->
GetDim
(
-
1
),
output
->
GetDim
(
-
1
)};
int
dims
[
2
]
=
{
output
->
unitNum
/
output
->
GetDim
(
-
1
),
output
->
GetDim
(
-
1
)};
probs
.
Reshape
(
2
,
dims
);
probs
.
Reshape
(
2
,
dims
);
...
@@ -475,7 +475,7 @@ float T2TTrainer::GetProb(XTensor * output, XTensor * gold, XTensor * wordProbs)
...
@@ -475,7 +475,7 @@ float T2TTrainer::GetProb(XTensor * output, XTensor * gold, XTensor * wordProbs)
/* probability for the batch */
/* probability for the batch */
XTensor
result
;
XTensor
result
;
InitTensor1D
V2
(
&
result
,
1
,
X_FLOAT
,
output
->
devID
);
InitTensor1D
(
&
result
,
1
,
X_FLOAT
,
output
->
devID
);
_ReduceSum
(
&
probs
,
&
result
,
1
);
_ReduceSum
(
&
probs
,
&
result
,
1
);
return
result
.
Get1D
(
0
);
return
result
.
Get1D
(
0
);
...
@@ -522,7 +522,7 @@ void T2TTrainer::Update(T2TModel * model, const float lr)
...
@@ -522,7 +522,7 @@ void T2TTrainer::Update(T2TModel * model, const float lr)
_ScaleAndShiftMe
(
v
,
(
1.0
F
-
adamBeta2
),
0
);
_ScaleAndShiftMe
(
v
,
(
1.0
F
-
adamBeta2
),
0
);
/* v2 = m / (sqrt(v) + delta) */
/* v2 = m / (sqrt(v) + delta) */
XTensor
*
v2
=
NewTensorBuf
V2
(
v
,
v
->
devID
);
XTensor
*
v2
=
NewTensorBuf
(
v
,
v
->
devID
);
_Power
(
v
,
v2
,
0.5
F
);
_Power
(
v
,
v2
,
0.5
F
);
_ScaleAndShiftMe
(
v2
,
1.0
F
,
d
);
_ScaleAndShiftMe
(
v2
,
1.0
F
,
d
);
_Div
(
m
,
v2
,
v2
);
_Div
(
m
,
v2
,
v2
);
...
@@ -593,7 +593,7 @@ void T2TTrainer::PadOutput(XTensor * output, XTensor * gold, XTensor * padding)
...
@@ -593,7 +593,7 @@ void T2TTrainer::PadOutput(XTensor * output, XTensor * gold, XTensor * padding)
output
->
Reshape
(
output
->
unitNum
/
dimso
[
output
->
order
-
1
],
dimso
[
output
->
order
-
1
]);
output
->
Reshape
(
output
->
unitNum
/
dimso
[
output
->
order
-
1
],
dimso
[
output
->
order
-
1
]);
XTensor
*
padding2
=
NewTensorBuf
V2
(
1
,
&
padding
->
unitNum
,
X_FLOAT
,
padding
->
devID
);
XTensor
*
padding2
=
NewTensorBuf
(
1
,
&
padding
->
unitNum
,
X_FLOAT
,
padding
->
devID
);
_CopyValues
(
padding
,
padding2
);
_CopyValues
(
padding
,
padding2
);
_MultiplyDim
(
output
,
padding2
,
output
,
0
);
_MultiplyDim
(
output
,
padding2
,
output
,
0
);
...
@@ -647,7 +647,7 @@ void T2TTrainer::LabelSmooth(XTensor * gold, XTensor * smoothed, DTYPE p)
...
@@ -647,7 +647,7 @@ void T2TTrainer::LabelSmooth(XTensor * gold, XTensor * smoothed, DTYPE p)
DTYPE
q
=
1.0
F
-
p
;
DTYPE
q
=
1.0
F
-
p
;
DTYPE
gift
=
p
/
n
;
DTYPE
gift
=
p
/
n
;
InitTensor
V2
(
smoothed
,
gold
);
InitTensor
(
smoothed
,
gold
);
_CopyValues
(
gold
,
smoothed
);
_CopyValues
(
gold
,
smoothed
);
if
(
p
==
0
)
if
(
p
==
0
)
...
...
source/tensor/XCall.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/XCall.h
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/XTensor.cpp
查看文件 @
823abb4f
...
@@ -103,7 +103,7 @@ XTensor::XTensor(const XTensor * reference)
...
@@ -103,7 +103,7 @@ XTensor::XTensor(const XTensor * reference)
SetDataPointer
();
SetDataPointer
();
id
=
MakeTensorID
();
id
=
MakeTensorID
();
InitTensor
(
this
,
reference
);
InitTensor
V2
(
this
,
reference
);
}
}
/*
/*
...
@@ -175,7 +175,7 @@ XTensor::XTensor(const XTensor &reference)
...
@@ -175,7 +175,7 @@ XTensor::XTensor(const XTensor &reference)
else
{
else
{
devID
=
reference
.
devID
;
devID
=
reference
.
devID
;
mem
=
reference
.
mem
;
mem
=
reference
.
mem
;
InitTensor
(
this
,
&
reference
);
InitTensor
V2
(
this
,
&
reference
);
_CopyValues
(
&
reference
,
this
);
_CopyValues
(
&
reference
,
this
);
}
}
...
...
source/tensor/core/arithmetic/Div.cpp
查看文件 @
823abb4f
...
@@ -252,7 +252,7 @@ where i is the index of the item
...
@@ -252,7 +252,7 @@ where i is the index of the item
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
void
Div
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
int
n
=
GetDivDimIndex
(
a
,
b
);
int
n
=
GetDivDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/DivDim.cpp
查看文件 @
823abb4f
...
@@ -190,7 +190,7 @@ i.e., a is divided with b by broadcasting
...
@@ -190,7 +190,7 @@ i.e., a is divided with b by broadcasting
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
)
void
DivDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
alpha
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _Div function */
/* call _Div function */
...
...
source/tensor/core/arithmetic/Mask.cpp
查看文件 @
823abb4f
...
@@ -173,7 +173,7 @@ where i is the index of the element
...
@@ -173,7 +173,7 @@ where i is the index of the element
void
Mask
(
const
XTensor
&
a
,
const
XTensor
&
mask
,
XTensor
&
c
,
DTYPE
alpha
)
void
Mask
(
const
XTensor
&
a
,
const
XTensor
&
mask
,
XTensor
&
c
,
DTYPE
alpha
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _Mask function */
/* call _Mask function */
...
...
source/tensor/core/arithmetic/MatrixMul.cpp
查看文件 @
823abb4f
...
@@ -65,8 +65,8 @@ void _MatrixMul(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
...
@@ -65,8 +65,8 @@ void _MatrixMul(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
if
(
transposedA
==
X_NOTRANS
&&
a
->
order
>
2
&&
b
->
order
==
2
){
if
(
transposedA
==
X_NOTRANS
&&
a
->
order
>
2
&&
b
->
order
==
2
){
int
ncolA
=
a
->
dimSize
[
a
->
order
-
1
];
int
ncolA
=
a
->
dimSize
[
a
->
order
-
1
];
int
ncolC
=
c
->
dimSize
[
c
->
order
-
1
];
int
ncolC
=
c
->
dimSize
[
c
->
order
-
1
];
XTensor
*
a2
=
NewTensor2D
(
a
->
unitNum
/
ncolA
,
-
ncolA
,
a
->
dataType
,
a
->
devID
,
a
->
mem
);
XTensor
*
a2
=
NewTensor2D
V2
(
a
->
unitNum
/
ncolA
,
-
ncolA
,
a
->
dataType
,
a
->
devID
,
a
->
mem
);
XTensor
*
c2
=
NewTensor2D
(
c
->
unitNum
/
ncolC
,
-
ncolC
,
c
->
dataType
,
c
->
devID
,
c
->
mem
);
XTensor
*
c2
=
NewTensor2D
V2
(
c
->
unitNum
/
ncolC
,
-
ncolC
,
c
->
dataType
,
c
->
devID
,
c
->
mem
);
a2
->
data
=
a
->
data
;
a2
->
data
=
a
->
data
;
c2
->
data
=
c
->
data
;
c2
->
data
=
c
->
data
;
_MatrixMul2D
(
a2
,
transposedA
,
b
,
transposedB
,
c2
,
alpha
,
beta
,
parallelRunner
);
_MatrixMul2D
(
a2
,
transposedA
,
b
,
transposedB
,
c2
,
alpha
,
beta
,
parallelRunner
);
...
@@ -127,9 +127,9 @@ void _MatrixMul(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
...
@@ -127,9 +127,9 @@ void _MatrixMul(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
CheckNTErrors
((
bRealBlockSize
*
q
<
b
->
unitNum
*
b
->
unitSize
),
"Something wrong!"
);
CheckNTErrors
((
bRealBlockSize
*
q
<
b
->
unitNum
*
b
->
unitSize
),
"Something wrong!"
);
CheckNTErrors
((
cRealBlockSize
*
(
p
*
bBlockNum
+
q
)
<
c
->
unitNum
*
c
->
unitSize
),
"Something wrong!"
);
CheckNTErrors
((
cRealBlockSize
*
(
p
*
bBlockNum
+
q
)
<
c
->
unitNum
*
c
->
unitSize
),
"Something wrong!"
);
XTensor
*
ai
=
NewTensor
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
ai
=
NewTensor
V2
(
2
,
aDimSize
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
bi
=
NewTensor
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
XTensor
*
bi
=
NewTensor
V2
(
2
,
bDimSize
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
XTensor
*
ci
=
NewTensor
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
c
->
devID
,
c
->
mem
);
XTensor
*
ci
=
NewTensor
V2
(
2
,
cDimSize
,
c
->
dataType
,
c
->
denseRatio
,
c
->
devID
,
c
->
mem
);
ai
->
data
=
ap
;
ai
->
data
=
ap
;
bi
->
data
=
bp
;
bi
->
data
=
bp
;
ci
->
data
=
cp
;
ci
->
data
=
cp
;
...
@@ -337,7 +337,7 @@ void MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA,
...
@@ -337,7 +337,7 @@ void MatrixMul(const XTensor &a, MATRIX_TRANS_TYPE transposedA,
dimSize
[
sub
++
]
=
bm
;
dimSize
[
sub
++
]
=
bm
;
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
InitTensor
(
&
c
,
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
InitTensor
V2
(
&
c
,
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
@@ -435,7 +435,7 @@ void MatrixMul(const XTensor &a, const XTensor &b, XTensor &c,
...
@@ -435,7 +435,7 @@ void MatrixMul(const XTensor &a, const XTensor &b, XTensor &c,
dimSize
[
sub
++
]
=
bm
;
dimSize
[
sub
++
]
=
bm
;
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
float
dr
=
(
!
a
.
isSparse
||
!
b
.
isSparse
)
?
1.0
F
:
MAX
(
a
.
denseRatio
,
b
.
denseRatio
);
InitTensor
(
&
c
,
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
InitTensor
V2
(
&
c
,
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/arithmetic/MatrixMulBatched.cpp
查看文件 @
823abb4f
...
@@ -191,9 +191,9 @@ void _MatrixMulBatchedCPU(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
...
@@ -191,9 +191,9 @@ void _MatrixMulBatchedCPU(const XTensor * a, MATRIX_TRANS_TYPE transposedA,
int
bDimSize
[
2
]
=
{
-
b
->
dimSize
[
b
->
order
-
2
],
b
->
dimSize
[
b
->
order
-
1
]};
int
bDimSize
[
2
]
=
{
-
b
->
dimSize
[
b
->
order
-
2
],
b
->
dimSize
[
b
->
order
-
1
]};
int
cDimSize
[
2
]
=
{
-
c
->
dimSize
[
c
->
order
-
2
],
c
->
dimSize
[
c
->
order
-
1
]};
int
cDimSize
[
2
]
=
{
-
c
->
dimSize
[
c
->
order
-
2
],
c
->
dimSize
[
c
->
order
-
1
]};
XTensor
*
ai
=
NewTensor2D
(
aDimSize
[
0
],
aDimSize
[
1
],
a
->
dataType
,
a
->
devID
,
a
->
mem
);
XTensor
*
ai
=
NewTensor2D
V2
(
aDimSize
[
0
],
aDimSize
[
1
],
a
->
dataType
,
a
->
devID
,
a
->
mem
);
XTensor
*
bi
=
NewTensor2D
(
bDimSize
[
0
],
bDimSize
[
1
],
b
->
dataType
,
b
->
devID
,
b
->
mem
);
XTensor
*
bi
=
NewTensor2D
V2
(
bDimSize
[
0
],
bDimSize
[
1
],
b
->
dataType
,
b
->
devID
,
b
->
mem
);
XTensor
*
ci
=
NewTensor2D
(
cDimSize
[
0
],
cDimSize
[
1
],
c
->
dataType
,
c
->
devID
,
c
->
mem
);
XTensor
*
ci
=
NewTensor2D
V2
(
cDimSize
[
0
],
cDimSize
[
1
],
c
->
dataType
,
c
->
devID
,
c
->
mem
);
for
(
int
i
=
0
;
i
<
blockNum
;
i
++
)
{
for
(
int
i
=
0
;
i
<
blockNum
;
i
++
)
{
ai
->
data
=
(
char
*
)
a
->
data
+
i
*
aRealBlockSize
;
ai
->
data
=
(
char
*
)
a
->
data
+
i
*
aRealBlockSize
;
...
...
source/tensor/core/arithmetic/MulAndShift.cpp
查看文件 @
823abb4f
...
@@ -91,7 +91,7 @@ XTensor MulAndShift(const XTensor &x, const XTensor &w, const XTensor &b,
...
@@ -91,7 +91,7 @@ XTensor MulAndShift(const XTensor &x, const XTensor &w, const XTensor &b,
float
dr
=
(
!
x
.
isSparse
||
!
w
.
isSparse
)
?
1.0
F
:
MAX
(
x
.
denseRatio
,
w
.
denseRatio
);
float
dr
=
(
!
x
.
isSparse
||
!
w
.
isSparse
)
?
1.0
F
:
MAX
(
x
.
denseRatio
,
w
.
denseRatio
);
XTensor
*
tmp
=
NewTensorBuf
(
order
,
dimSize
,
x
.
dataType
,
dr
,
x
.
devID
,
x
.
mem
);
XTensor
*
tmp
=
NewTensorBuf
V2
(
order
,
dimSize
,
x
.
dataType
,
dr
,
x
.
devID
,
x
.
mem
);
/* call _MatrixMul function */
/* call _MatrixMul function */
_MatrixMul
(
&
x
,
X_NOTRANS
,
&
w
,
X_NOTRANS
,
tmp
,
alpha
,
0
,
parallelRunner
);
_MatrixMul
(
&
x
,
X_NOTRANS
,
&
w
,
X_NOTRANS
,
tmp
,
alpha
,
0
,
parallelRunner
);
...
@@ -166,7 +166,7 @@ XTensor MulAndShift(const XTensor& x, MATRIX_TRANS_TYPE transposedA,
...
@@ -166,7 +166,7 @@ XTensor MulAndShift(const XTensor& x, MATRIX_TRANS_TYPE transposedA,
float
dr
=
(
!
x
.
isSparse
||
!
w
.
isSparse
)
?
1.0
F
:
MAX
(
x
.
denseRatio
,
w
.
denseRatio
);
float
dr
=
(
!
x
.
isSparse
||
!
w
.
isSparse
)
?
1.0
F
:
MAX
(
x
.
denseRatio
,
w
.
denseRatio
);
XTensor
*
tmp
=
NewTensorBuf
(
order
,
dimSize
,
x
.
dataType
,
dr
,
x
.
devID
,
x
.
mem
);
XTensor
*
tmp
=
NewTensorBuf
V2
(
order
,
dimSize
,
x
.
dataType
,
dr
,
x
.
devID
,
x
.
mem
);
/* call _MatrixMul function */
/* call _MatrixMul function */
_MatrixMul
(
&
x
,
transposedA
,
&
w
,
transposedB
,
tmp
,
alpha
,
0
,
parallelRunner
);
_MatrixMul
(
&
x
,
transposedA
,
&
w
,
transposedB
,
tmp
,
alpha
,
0
,
parallelRunner
);
...
...
source/tensor/core/arithmetic/Multiply.cpp
查看文件 @
823abb4f
...
@@ -253,7 +253,7 @@ where i is the index of the item
...
@@ -253,7 +253,7 @@ where i is the index of the item
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
void
Multiply
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
alpha
,
int
leadingDim
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
int
n
=
GetMultiplyDimIndex
(
a
,
b
);
int
n
=
GetMultiplyDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/MultiplyDim.cpp
查看文件 @
823abb4f
...
@@ -205,7 +205,7 @@ i.e., a is multiplied with b by broadcasting
...
@@ -205,7 +205,7 @@ i.e., a is multiplied with b by broadcasting
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
)
void
MultiplyDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _Multiply function */
/* call _Multiply function */
...
@@ -281,8 +281,8 @@ void _MultiplyBroadcast(const XTensor * a, const XTensor * b, XTensor * c, DTYPE
...
@@ -281,8 +281,8 @@ void _MultiplyBroadcast(const XTensor * a, const XTensor * b, XTensor * c, DTYPE
dimsS
[
0
]
=
-
dimsS
[
0
];
dimsS
[
0
]
=
-
dimsS
[
0
];
dimsT
[
0
]
=
-
dimsT
[
0
];
dimsT
[
0
]
=
-
dimsT
[
0
];
XTensor
*
s
=
NewTensor
(
order
-
(
j
-
i
),
dimsS
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
s
=
NewTensor
V2
(
order
-
(
j
-
i
),
dimsS
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
t
=
NewTensor
(
order
-
(
j
-
i
)
+
1
,
dimsT
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
XTensor
*
t
=
NewTensor
V2
(
order
-
(
j
-
i
)
+
1
,
dimsT
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
if
(
count
==
0
)
if
(
count
==
0
)
source
=
b
->
data
;
source
=
b
->
data
;
...
@@ -373,7 +373,7 @@ where some of dimensions of b can be of size 1
...
@@ -373,7 +373,7 @@ where some of dimensions of b can be of size 1
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
)
void
MultiplyBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _SumBroadcast function */
/* call _SumBroadcast function */
...
...
source/tensor/core/arithmetic/Sub.cpp
查看文件 @
823abb4f
...
@@ -225,7 +225,7 @@ tensor subtraction c = a - b * \beta
...
@@ -225,7 +225,7 @@ tensor subtraction c = a - b * \beta
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
void
Sub
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
int
n
=
GetSubDimIndex
(
a
,
b
);
int
n
=
GetSubDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/SubDim.cpp
查看文件 @
823abb4f
...
@@ -190,7 +190,7 @@ i.e., a is subtracted with b by broadcasting
...
@@ -190,7 +190,7 @@ i.e., a is subtracted with b by broadcasting
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
void
SubDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _Sub function */
/* call _Sub function */
...
...
source/tensor/core/arithmetic/Sum.cpp
查看文件 @
823abb4f
...
@@ -258,7 +258,7 @@ tensor summation c = a + b * \beta
...
@@ -258,7 +258,7 @@ tensor summation c = a + b * \beta
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
void
Sum
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
int
n
=
GetSumDimIndex
(
a
,
b
);
int
n
=
GetSumDimIndex
(
a
,
b
);
...
...
source/tensor/core/arithmetic/SumDim.cpp
查看文件 @
823abb4f
...
@@ -193,7 +193,7 @@ i.e., a is summed with b by broadcasting
...
@@ -193,7 +193,7 @@ i.e., a is summed with b by broadcasting
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
void
SumDim
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
int
n
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _SumDim function */
/* call _SumDim function */
...
@@ -268,8 +268,8 @@ void _SumBroadcast(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta
...
@@ -268,8 +268,8 @@ void _SumBroadcast(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta
dimsS
[
0
]
=
-
dimsS
[
0
];
dimsS
[
0
]
=
-
dimsS
[
0
];
dimsT
[
0
]
=
-
dimsT
[
0
];
dimsT
[
0
]
=
-
dimsT
[
0
];
XTensor
*
s
=
NewTensor
(
order
-
(
j
-
i
),
dimsS
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
s
=
NewTensor
V2
(
order
-
(
j
-
i
),
dimsS
,
a
->
dataType
,
a
->
denseRatio
,
a
->
devID
,
a
->
mem
);
XTensor
*
t
=
NewTensor
(
order
-
(
j
-
i
)
+
1
,
dimsT
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
XTensor
*
t
=
NewTensor
V2
(
order
-
(
j
-
i
)
+
1
,
dimsT
,
b
->
dataType
,
b
->
denseRatio
,
b
->
devID
,
b
->
mem
);
if
(
count
==
0
)
if
(
count
==
0
)
source
=
b
->
data
;
source
=
b
->
data
;
...
@@ -362,7 +362,7 @@ c = a + b * \beta
...
@@ -362,7 +362,7 @@ c = a + b * \beta
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
void
SumBroadcast
(
const
XTensor
&
a
,
const
XTensor
&
b
,
XTensor
&
c
,
DTYPE
beta
)
{
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
if
(
!
c
.
isInit
||
!
IsSameShaped
(
a
,
c
))
{
InitTensor
(
&
c
,
&
a
);
InitTensor
V2
(
&
c
,
&
a
);
}
}
/* call _SumBroadcast function */
/* call _SumBroadcast function */
...
...
source/tensor/core/getandset/ConvertDataType.cpp
查看文件 @
823abb4f
...
@@ -131,7 +131,7 @@ void ConvertDataType(const XTensor & input, XTensor & output, TENSOR_DATA_TYPE d
...
@@ -131,7 +131,7 @@ void ConvertDataType(const XTensor & input, XTensor & output, TENSOR_DATA_TYPE d
{
{
if
(
!
output
.
isInit
||
input
.
dataType
!=
output
.
dataType
)
{
if
(
!
output
.
isInit
||
input
.
dataType
!=
output
.
dataType
)
{
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
InitTensor
(
&
output
,
input
.
order
,
input
.
dimSize
,
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
InitTensor
V2
(
&
output
,
input
.
order
,
input
.
dimSize
,
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
}
}
_ConvertDataType
(
&
input
,
&
output
);
_ConvertDataType
(
&
input
,
&
output
);
...
...
source/tensor/core/getandset/OnehotAndIndex.cpp
查看文件 @
823abb4f
...
@@ -85,7 +85,7 @@ XTensor OnehotToIndex(const XTensor & onehot, int size)
...
@@ -85,7 +85,7 @@ XTensor OnehotToIndex(const XTensor & onehot, int size)
CheckNTErrors
(
onehot
.
dataType
==
X_INT
,
"The onehot tensor must be in X_INT!"
)
CheckNTErrors
(
onehot
.
dataType
==
X_INT
,
"The onehot tensor must be in X_INT!"
)
XTensor
index
;
XTensor
index
;
InitTensor
(
&
index
,
onehot
.
order
-
1
,
onehot
.
dimSize
,
X_INT
,
1.0
F
,
onehot
.
devID
,
onehot
.
mem
);
InitTensor
V2
(
&
index
,
onehot
.
order
-
1
,
onehot
.
dimSize
,
X_INT
,
1.0
F
,
onehot
.
devID
,
onehot
.
mem
);
index
.
SetTMPFlag
();
index
.
SetTMPFlag
();
_OnehotToIndex
(
&
onehot
,
&
index
,
size
);
_OnehotToIndex
(
&
onehot
,
&
index
,
size
);
...
@@ -173,7 +173,7 @@ void _IndexToOnehot(int * index, int n, XTensor * onehot, int size, float labelS
...
@@ -173,7 +173,7 @@ void _IndexToOnehot(int * index, int n, XTensor * onehot, int size, float labelS
int * od = onehotData + i * stride;
int * od = onehotData + i * stride;
od[id] = 1;
od[id] = 1;
}*/
}*/
XTensor
*
cudaIndex
=
NewTensor1D
(
n
,
X_INT
,
onehot
->
devID
);
XTensor
*
cudaIndex
=
NewTensor1D
V2
(
n
,
X_INT
,
onehot
->
devID
);
cudaIndex
->
SetData
(
index
,
n
);
cudaIndex
->
SetData
(
index
,
n
);
_IndexToOnehot
(
cudaIndex
,
onehot
,
size
,
labelSmoothingP
);
_IndexToOnehot
(
cudaIndex
,
onehot
,
size
,
labelSmoothingP
);
delete
[]
cudaIndex
;
delete
[]
cudaIndex
;
...
@@ -200,7 +200,7 @@ XTensor IndexToOnehot(const XTensor & index, int size, float labelSmoothingP)
...
@@ -200,7 +200,7 @@ XTensor IndexToOnehot(const XTensor & index, int size, float labelSmoothingP)
int
*
dim
=
new
int
[
order
+
1
];
int
*
dim
=
new
int
[
order
+
1
];
memcpy
(
dim
,
index
.
dimSize
,
order
*
sizeof
(
int
));
memcpy
(
dim
,
index
.
dimSize
,
order
*
sizeof
(
int
));
dim
[
order
]
=
size
;
dim
[
order
]
=
size
;
InitTensor
(
&
onehot
,
index
.
order
+
1
,
dim
,
X_FLOAT
,
1.0
F
,
index
.
devID
,
index
.
mem
);
InitTensor
V2
(
&
onehot
,
index
.
order
+
1
,
dim
,
X_FLOAT
,
1.0
F
,
index
.
devID
,
index
.
mem
);
_IndexToOnehot
(
&
index
,
&
onehot
,
size
,
labelSmoothingP
);
_IndexToOnehot
(
&
index
,
&
onehot
,
size
,
labelSmoothingP
);
...
...
source/tensor/core/getandset/SetData.cpp
查看文件 @
823abb4f
...
@@ -470,7 +470,7 @@ void _SetDataLowTri(XTensor * tensor, DTYPE p, int shift)
...
@@ -470,7 +470,7 @@ void _SetDataLowTri(XTensor * tensor, DTYPE p, int shift)
void
_SetDataRand
(
XTensor
*
tensor
,
int
rNum
,
int
cNum
)
void
_SetDataRand
(
XTensor
*
tensor
,
int
rNum
,
int
cNum
)
{
{
if
(
tensor
==
NULL
||
tensor
->
isInit
==
false
||
tensor
->
order
!=
2
)
{
if
(
tensor
==
NULL
||
tensor
->
isInit
==
false
||
tensor
->
order
!=
2
)
{
InitTensor2D
(
tensor
,
rNum
,
cNum
);
InitTensor2D
V2
(
tensor
,
rNum
,
cNum
);
}
}
_SetDataRand
(
tensor
,
0.0
F
,
1.0
F
);
_SetDataRand
(
tensor
,
0.0
F
,
1.0
F
);
...
@@ -519,7 +519,7 @@ void _SetDataRand(XTensor * tensor, DTYPE lower, DTYPE upper)
...
@@ -519,7 +519,7 @@ void _SetDataRand(XTensor * tensor, DTYPE lower, DTYPE upper)
#ifdef USE_CUDA
#ifdef USE_CUDA
_CudaSetDataRand
(
tensor
,
lower
,
upper
);
_CudaSetDataRand
(
tensor
,
lower
,
upper
);
#endif
#endif
//XTensor * t2 = NewTensor(tensor->order, tensor->dimSize, tensor->dataType, tensor->denseRatio, -1);
//XTensor * t2 = NewTensor
V2
(tensor->order, tensor->dimSize, tensor->dataType, tensor->denseRatio, -1);
//_SetDataRand(t2, low, high);
//_SetDataRand(t2, low, high);
//_CopyValues(t2, tensor);
//_CopyValues(t2, tensor);
//delete t2;
//delete t2;
...
...
source/tensor/core/math/Binary.cpp
查看文件 @
823abb4f
...
@@ -183,7 +183,7 @@ template<class T>
...
@@ -183,7 +183,7 @@ template<class T>
void funcName(const XTensor &a, XTensor &b, T num) \
void funcName(const XTensor &a, XTensor &b, T num) \
{ \
{ \
if (!b.isInit || !IsSameShaped(a, b)) { \
if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \
InitTensor
V2
(&b, &a); \
} \
} \
_funcName(&a, &b, num); \
_funcName(&a, &b, num); \
if (a.enableGrad) { \
if (a.enableGrad) { \
...
...
source/tensor/core/math/Clip.cpp
查看文件 @
823abb4f
...
@@ -112,7 +112,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
...
@@ -112,7 +112,7 @@ XTensor Clip(const XTensor & a, DTYPE lower, DTYPE upper)
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
)
void
Clip
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
lower
,
DTYPE
upper
)
{
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
a
,
b
))
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
a
,
b
))
{
InitTensor
(
&
b
,
&
a
);
InitTensor
V2
(
&
b
,
&
a
);
}
}
/* call _Clip function */
/* call _Clip function */
...
...
source/tensor/core/math/Compare.cpp
查看文件 @
823abb4f
...
@@ -98,7 +98,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
...
@@ -98,7 +98,7 @@ XTensor funcName(const XTensor &a, DTYPE number)
void funcName(const XTensor &a, XTensor &b, DTYPE number) \
void funcName(const XTensor &a, XTensor &b, DTYPE number) \
{ \
{ \
if (!b.isInit || !IsSameShaped(a, b)) { \
if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \
InitTensor
V2
(&b, &a); \
} \
} \
_funcName(&a, &b, number); \
_funcName(&a, &b, number); \
}
}
...
...
source/tensor/core/math/Normalize.cpp
查看文件 @
823abb4f
...
@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
...
@@ -204,7 +204,7 @@ void Normalize(const XTensor &input, XTensor &output, int dim,
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
epsilon
)
const
XTensor
&
a
,
const
XTensor
&
b
,
DTYPE
epsilon
)
{
{
if
(
!
output
.
isInit
||
!
IsSameShaped
(
input
,
output
))
{
if
(
!
output
.
isInit
||
!
IsSameShaped
(
input
,
output
))
{
InitTensor
(
&
output
,
&
input
);
InitTensor
V2
(
&
output
,
&
input
);
}
}
/* call _Normalize function */
/* call _Normalize function */
...
...
source/tensor/core/math/ScaleAndShift.cpp
查看文件 @
823abb4f
...
@@ -149,7 +149,7 @@ b = a * scale + shift
...
@@ -149,7 +149,7 @@ b = a * scale + shift
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
)
void
ScaleAndShift
(
const
XTensor
&
a
,
XTensor
&
b
,
DTYPE
scale
,
DTYPE
shift
)
{
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
a
,
b
))
{
if
(
!
b
.
isInit
||
!
IsSameShaped
(
a
,
b
))
{
InitTensor
(
&
b
,
&
a
);
InitTensor
V2
(
&
b
,
&
a
);
}
}
/* call _ScaleAndShift function */
/* call _ScaleAndShift function */
...
...
source/tensor/core/math/Unary.cpp
查看文件 @
823abb4f
...
@@ -162,7 +162,7 @@ XTensor funcName(const XTensor & a)
...
@@ -162,7 +162,7 @@ XTensor funcName(const XTensor & a)
void funcName(const XTensor & a, XTensor & b) \
void funcName(const XTensor & a, XTensor & b) \
{ \
{ \
if (!b.isInit || !IsSameShaped(a, b)) { \
if (!b.isInit || !IsSameShaped(a, b)) { \
InitTensor(&b, &a); \
InitTensor
V2
(&b, &a); \
} \
} \
_funcName(&a, &b); \
_funcName(&a, &b); \
if (a.enableGrad) { \
if (a.enableGrad) { \
...
...
source/tensor/core/reduce/ReduceMean.cpp
查看文件 @
823abb4f
...
@@ -111,7 +111,7 @@ void ReduceMean(const XTensor &input, XTensor &output, int dim)
...
@@ -111,7 +111,7 @@ void ReduceMean(const XTensor &input, XTensor &output, int dim)
}
}
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
InitTensor
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
InitTensor
V2
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/reduce/ReduceSum.cpp
查看文件 @
823abb4f
...
@@ -334,7 +334,7 @@ void ReduceSum(const XTensor &input, XTensor &output, int dim, const XTensor &sh
...
@@ -334,7 +334,7 @@ void ReduceSum(const XTensor &input, XTensor &output, int dim, const XTensor &sh
}
}
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
InitTensor
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
InitTensor
V2
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
@@ -429,7 +429,7 @@ void ReduceSum(const XTensor &input, XTensor &output, int dim, DTYPE power, bool
...
@@ -429,7 +429,7 @@ void ReduceSum(const XTensor &input, XTensor &output, int dim, DTYPE power, bool
}
}
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
InitTensor
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
InitTensor
V2
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/reduce/ReduceSumAll.cpp
查看文件 @
823abb4f
...
@@ -49,8 +49,8 @@ DTYPE _ReduceSumAll(const XTensor * source)
...
@@ -49,8 +49,8 @@ DTYPE _ReduceSumAll(const XTensor * source)
int
dims
[
2
]
=
{
1
,
source
->
unitNum
};
int
dims
[
2
]
=
{
1
,
source
->
unitNum
};
int
one
=
1
;
int
one
=
1
;
XTensor
*
all
=
NewTensorBuf
(
2
,
dims
,
source
->
dataType
,
source
->
denseRatio
,
source
->
devID
,
source
->
mem
);
XTensor
*
all
=
NewTensorBuf
V2
(
2
,
dims
,
source
->
dataType
,
source
->
denseRatio
,
source
->
devID
,
source
->
mem
);
XTensor
*
result
=
NewTensorBuf
(
1
,
&
one
,
source
->
dataType
,
1.0
F
,
source
->
devID
,
source
->
mem
);
XTensor
*
result
=
NewTensorBuf
V2
(
1
,
&
one
,
source
->
dataType
,
1.0
F
,
source
->
devID
,
source
->
mem
);
_CopyValues
(
source
,
all
);
_CopyValues
(
source
,
all
);
_ReduceSum
(
all
,
result
,
1
);
_ReduceSum
(
all
,
result
,
1
);
...
@@ -74,7 +74,7 @@ DTYPE _ReduceSumAll(const XTensor * source)
...
@@ -74,7 +74,7 @@ DTYPE _ReduceSumAll(const XTensor * source)
int leadingDim = big->order - 1;
int leadingDim = big->order - 1;
int * dimSize;
int * dimSize;
dimSize = getDimSize(big, leadingDim);
dimSize = getDimSize(big, leadingDim);
XTensor * little = NewTensor(big->order - 1, dimSize, source->dataType, source->denseRatio,
XTensor * little = NewTensor
V2
(big->order - 1, dimSize, source->dataType, source->denseRatio,
source->devID, source->mem);
source->devID, source->mem);
_ReduceSum(big, little, leadingDim);
_ReduceSum(big, little, leadingDim);
...
...
source/tensor/core/reduce/ReduceSumSquared.cpp
查看文件 @
823abb4f
...
@@ -109,7 +109,7 @@ void ReduceSumSquared(const XTensor &input, XTensor &output, int dim, const XTen
...
@@ -109,7 +109,7 @@ void ReduceSumSquared(const XTensor &input, XTensor &output, int dim, const XTen
}
}
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
InitTensor
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
InitTensor
V2
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/reduce/ReduceVariance.cpp
查看文件 @
823abb4f
...
@@ -111,7 +111,7 @@ void ReduceVariance(const XTensor &input, XTensor &output, int dim, const XTenso
...
@@ -111,7 +111,7 @@ void ReduceVariance(const XTensor &input, XTensor &output, int dim, const XTenso
}
}
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
float
dr
=
(
!
input
.
isSparse
)
?
1.0
F
:
input
.
denseRatio
;
InitTensor
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
InitTensor
V2
(
&
output
,
order
,
dimSize
,
input
.
dataType
,
dr
,
input
.
devID
,
input
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/shape/Concatenate.cpp
查看文件 @
823abb4f
...
@@ -208,7 +208,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
...
@@ -208,7 +208,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
}
}
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
InitTensor
(
&
big
,
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
InitTensor
V2
(
&
big
,
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
}
}
else
{
else
{
for
(
int
i
=
0
;
i
<
tensor
->
order
;
i
++
)
for
(
int
i
=
0
;
i
<
tensor
->
order
;
i
++
)
...
@@ -223,7 +223,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
...
@@ -223,7 +223,7 @@ void Concatenate(const TensorList & smalls, XTensor & big, int dim)
dimSize
[
dim
]
=
catDimSize
;
dimSize
[
dim
]
=
catDimSize
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
float
dr
=
(
!
tensor
->
isSparse
)
?
1.0
F
:
tensor
->
denseRatio
;
InitTensor
(
&
big
,
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
InitTensor
V2
(
&
big
,
order
,
dimSize
,
tensor
->
dataType
,
dr
,
tensor
->
devID
,
tensor
->
mem
);
}
}
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/shape/Merge.cpp
查看文件 @
823abb4f
...
@@ -254,7 +254,7 @@ void Merge(const XTensor &s, XTensor &t, int whereToMerge, int leadingDim)
...
@@ -254,7 +254,7 @@ void Merge(const XTensor &s, XTensor &t, int whereToMerge, int leadingDim)
}
}
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
InitTensor
(
&
t
,
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
InitTensor
V2
(
&
t
,
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/shape/Reshape.cpp
查看文件 @
823abb4f
...
@@ -54,7 +54,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
...
@@ -54,7 +54,7 @@ XTensor Reshape(XTensor &s, int order, int * dimSize)
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
)
void
Reshape
(
XTensor
&
s
,
XTensor
&
t
,
int
order
,
int
*
dimSize
)
{
{
if
(
!
t
.
isInit
||
!
IsSameShaped
(
t
,
s
))
{
if
(
!
t
.
isInit
||
!
IsSameShaped
(
t
,
s
))
{
InitTensor
(
&
t
,
&
s
);
InitTensor
V2
(
&
t
,
&
s
);
}
}
/* call Reshape function */
/* call Reshape function */
...
...
source/tensor/core/shape/Split.cpp
查看文件 @
823abb4f
...
@@ -243,7 +243,7 @@ void Split(const XTensor &s, XTensor &t, int whereToSplit, int splitNum)
...
@@ -243,7 +243,7 @@ void Split(const XTensor &s, XTensor &t, int whereToSplit, int splitNum)
}
}
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
float
dr
=
(
!
s
.
isSparse
)
?
1.0
F
:
s
.
denseRatio
;
InitTensor
(
&
t
,
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
InitTensor
V2
(
&
t
,
order
,
dimSize
,
s
.
dataType
,
dr
,
s
.
devID
,
s
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/core/shape/Squeeze.cpp
查看文件 @
823abb4f
...
@@ -132,7 +132,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
...
@@ -132,7 +132,7 @@ XTensor Squeeze(XTensor & source, int leadingDim)
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
)
void
Squeeze
(
XTensor
&
source
,
XTensor
&
target
,
int
leadingDim
)
{
{
if
(
!
target
.
isInit
||
!
IsSameShaped
(
source
,
target
))
{
if
(
!
target
.
isInit
||
!
IsSameShaped
(
source
,
target
))
{
InitTensor
(
&
target
,
&
source
);
InitTensor
V2
(
&
target
,
&
source
);
}
}
/* call _Squeeze function */
/* call _Squeeze function */
...
...
source/tensor/core/shape/Unsqueeze.cpp
查看文件 @
823abb4f
...
@@ -183,7 +183,7 @@ void Unsqueeze(const XTensor &a, XTensor &b, int dim, int dSize)
...
@@ -183,7 +183,7 @@ void Unsqueeze(const XTensor &a, XTensor &b, int dim, int dSize)
}
}
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
float
dr
=
(
!
a
.
isSparse
)
?
1.0
F
:
a
.
denseRatio
;
InitTensor
(
&
b
,
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
InitTensor
V2
(
&
b
,
order
,
dimSize
,
a
.
dataType
,
dr
,
a
.
devID
,
a
.
mem
);
/* destroy variables */
/* destroy variables */
delete
[]
dimSize
;
delete
[]
dimSize
;
...
...
source/tensor/function/Dropout.cpp
查看文件 @
823abb4f
...
@@ -68,7 +68,7 @@ void _Dropout(const XTensor * x, XTensor * y, unsigned int seed, DTYPE dropProb,
...
@@ -68,7 +68,7 @@ void _Dropout(const XTensor * x, XTensor * y, unsigned int seed, DTYPE dropProb,
for
(
int
i
=
0
;
i
<
unitNum
;
i
++
)
for
(
int
i
=
0
;
i
<
unitNum
;
i
++
)
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
XTensor
*
mask
=
NewTensor1D
(
unitNum
,
x
->
dataType
,
x
->
devID
,
x
->
mem
);
XTensor
*
mask
=
NewTensor1D
V2
(
unitNum
,
x
->
dataType
,
x
->
devID
,
x
->
mem
);
mask
->
SetData
(
maskArray
,
unitNum
);
mask
->
SetData
(
maskArray
,
unitNum
);
/* call Multiply function for mask */
/* call Multiply function for mask */
...
@@ -113,7 +113,7 @@ void _DropoutBackward(const XTensor * y, const XTensor * x,
...
@@ -113,7 +113,7 @@ void _DropoutBackward(const XTensor * y, const XTensor * x,
for
(
int
i
=
0
;
i
<
unitNum
;
i
++
)
for
(
int
i
=
0
;
i
<
unitNum
;
i
++
)
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
XTensor
*
mask
=
NewTensor1D
(
unitNum
,
x
->
dataType
,
x
->
devID
,
x
->
mem
);
XTensor
*
mask
=
NewTensor1D
V2
(
unitNum
,
x
->
dataType
,
x
->
devID
,
x
->
mem
);
mask
->
SetData
(
maskArray
,
unitNum
);
mask
->
SetData
(
maskArray
,
unitNum
);
/* call MultiplyDim function for mask */
/* call MultiplyDim function for mask */
...
@@ -154,7 +154,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
...
@@ -154,7 +154,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
if
(
leadingDim
<
0
&&
leadingDim2
<
0
){
if
(
leadingDim
<
0
&&
leadingDim2
<
0
){
XTensor
mask
;
XTensor
mask
;
InitTensor
(
&
mask
,
&
x
);
InitTensor
V2
(
&
mask
,
&
x
);
_SetDataRandP
(
&
mask
,
0
,
1.0
F
,
dropProb
,
scaleFactor
);
_SetDataRandP
(
&
mask
,
0
,
1.0
F
,
dropProb
,
scaleFactor
);
...
@@ -168,7 +168,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
...
@@ -168,7 +168,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
maskArrayInt[i] = rand() % x.unitNum;
maskArrayInt[i] = rand() % x.unitNum;
XTensor maskindex;
XTensor maskindex;
InitTensor1D(&maskindex, unitNum, X_INT, x.devID, x.mem);
InitTensor1D
V2
(&maskindex, unitNum, X_INT, x.devID, x.mem);
maskindex.SetData(maskArrayInt, unitNum);
maskindex.SetData(maskArrayInt, unitNum);
...
@@ -191,7 +191,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
...
@@ -191,7 +191,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
XTensor
mask
;
XTensor
mask
;
InitTensor1D
(
&
mask
,
unitNum
,
x
.
dataType
,
x
.
devID
,
x
.
mem
);
InitTensor1D
V2
(
&
mask
,
unitNum
,
x
.
dataType
,
x
.
devID
,
x
.
mem
);
mask
.
SetData
(
maskArray
,
unitNum
);
mask
.
SetData
(
maskArray
,
unitNum
);
delete
[]
maskArray
;
delete
[]
maskArray
;
...
@@ -220,7 +220,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
...
@@ -220,7 +220,7 @@ XTensor Dropout(const XTensor &x, DTYPE dropProb, int leadingDim, int leadingDim
dims
[
n
]
=
x
.
GetDim
(
n
);
dims
[
n
]
=
x
.
GetDim
(
n
);
dims
[
m
]
=
x
.
GetDim
(
m
);
dims
[
m
]
=
x
.
GetDim
(
m
);
InitTensor
(
&
mask
,
x
.
order
,
dims
,
x
.
dataType
,
x
.
denseRatio
,
x
.
devID
,
x
.
mem
);
InitTensor
V2
(
&
mask
,
x
.
order
,
dims
,
x
.
dataType
,
x
.
denseRatio
,
x
.
devID
,
x
.
mem
);
mask
.
SetData
(
maskArray
,
unitNum
);
mask
.
SetData
(
maskArray
,
unitNum
);
delete
[]
maskArray
;
delete
[]
maskArray
;
...
@@ -250,7 +250,7 @@ XTensor DropoutWithoutBroadcast(const XTensor &x, DTYPE dropProb)
...
@@ -250,7 +250,7 @@ XTensor DropoutWithoutBroadcast(const XTensor &x, DTYPE dropProb)
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
maskArray
[
i
]
=
RandomBernoulli
(
dropProb
,
scaleFactor
);
XTensor
mask
;
XTensor
mask
;
InitTensor
(
&
mask
,
x
.
order
,
x
.
dimSize
,
x
.
dataType
,
x
.
denseRatio
,
x
.
devID
,
x
.
mem
);
InitTensor
V2
(
&
mask
,
x
.
order
,
x
.
dimSize
,
x
.
dataType
,
x
.
denseRatio
,
x
.
devID
,
x
.
mem
);
mask
.
SetData
(
maskArray
,
unitNum
);
mask
.
SetData
(
maskArray
,
unitNum
);
delete
[]
maskArray
;
delete
[]
maskArray
;
...
...
source/tensor/function/DropoutWithIndex.cpp
查看文件 @
823abb4f
...
@@ -68,7 +68,7 @@ XTensor DropoutWithIndex(const XTensor &x, XTensor &maskIndex, DTYPE scale)
...
@@ -68,7 +68,7 @@ XTensor DropoutWithIndex(const XTensor &x, XTensor &maskIndex, DTYPE scale)
dimSize
[
i
]
=
x
.
dimSize
[
i
];
dimSize
[
i
]
=
x
.
dimSize
[
i
];
}
}
InitTensor1D
(
&
c
,
x
.
unitNum
,
x
.
dataType
,
x
.
devID
,
x
.
mem
);
InitTensor1D
V2
(
&
c
,
x
.
unitNum
,
x
.
dataType
,
x
.
devID
,
x
.
mem
);
_SetDataFixedFloat
(
&
c
,
1.0
F
);
_SetDataFixedFloat
(
&
c
,
1.0
F
);
...
...
source/tensor/function/HardTanH.cpp
查看文件 @
823abb4f
...
@@ -89,7 +89,7 @@ XTensor HardTanH(const XTensor &x)
...
@@ -89,7 +89,7 @@ XTensor HardTanH(const XTensor &x)
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
)
void
HardTanH
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
V2
(
&
y
,
&
x
);
}
}
/* call _HardTanH function */
/* call _HardTanH function */
...
...
source/tensor/function/Identity.cpp
查看文件 @
823abb4f
...
@@ -65,7 +65,7 @@ XTensor Identity(const XTensor &x)
...
@@ -65,7 +65,7 @@ XTensor Identity(const XTensor &x)
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
)
void
Identity
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
V2
(
&
y
,
&
x
);
}
}
/* call _Identity function */
/* call _Identity function */
...
...
source/tensor/function/LogSoftmax.cpp
查看文件 @
823abb4f
...
@@ -79,8 +79,8 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
...
@@ -79,8 +79,8 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
blockSize
=
stride
*
dimensionSize
;
blockSize
=
stride
*
dimensionSize
;
blockNum
=
y
->
unitNum
/
blockSize
;
blockNum
=
y
->
unitNum
/
blockSize
;
max
=
NewTensorBuf
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
max
=
NewTensorBuf
V2
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
sum
=
NewTensorBuf
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
sum
=
NewTensorBuf
V2
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
_ReduceMax
(
x
,
max
,
leadDim
);
_ReduceMax
(
x
,
max
,
leadDim
);
_ReduceSum
(
x
,
sum
,
leadDim
,
max
,
1.0
F
,
true
);
_ReduceSum
(
x
,
sum
,
leadDim
,
max
,
1.0
F
,
true
);
...
@@ -89,16 +89,16 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
...
@@ -89,16 +89,16 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
if
(
leadDim
==
x
->
order
-
1
){
if
(
leadDim
==
x
->
order
-
1
){
blockSize
=
y
->
unitNum
;
blockSize
=
y
->
unitNum
;
blockNum
=
1
;
blockNum
=
1
;
blockx
=
NewTensor2D
(
blockSize
/
dimensionSize
,
-
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blockx
=
NewTensor2D
V2
(
blockSize
/
dimensionSize
,
-
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blocky
=
NewTensor2D
(
blockSize
/
dimensionSize
,
-
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blocky
=
NewTensor2D
V2
(
blockSize
/
dimensionSize
,
-
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blockMax
=
NewTensor2D
(
blockSize
/
dimensionSize
,
-
1
,
x
->
dataType
,
x
->
devID
,
mem
);
blockMax
=
NewTensor2D
V2
(
blockSize
/
dimensionSize
,
-
1
,
x
->
dataType
,
x
->
devID
,
mem
);
blockSum
=
NewTensor2D
(
blockSize
/
dimensionSize
,
-
1
,
x
->
dataType
,
x
->
devID
,
mem
);
blockSum
=
NewTensor2D
V2
(
blockSize
/
dimensionSize
,
-
1
,
x
->
dataType
,
x
->
devID
,
mem
);
}
}
else
{
else
{
blockx
=
NewTensor2D
(
-
stride
,
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blockx
=
NewTensor2D
V2
(
-
stride
,
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blocky
=
NewTensor2D
(
-
stride
,
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blocky
=
NewTensor2D
V2
(
-
stride
,
dimensionSize
,
x
->
dataType
,
x
->
devID
,
mem
);
blockMax
=
NewTensor2D
(
-
stride
,
1
,
x
->
dataType
,
x
->
devID
,
mem
);
blockMax
=
NewTensor2D
V2
(
-
stride
,
1
,
x
->
dataType
,
x
->
devID
,
mem
);
blockSum
=
NewTensor2D
(
-
stride
,
1
,
x
->
dataType
,
x
->
devID
,
mem
);
blockSum
=
NewTensor2D
V2
(
-
stride
,
1
,
x
->
dataType
,
x
->
devID
,
mem
);
}
}
}
}
...
@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
...
@@ -211,7 +211,7 @@ void LogSoftmax(const XTensor &x, XTensor &y, int leadDim)
ld
=
x
.
order
-
1
;
ld
=
x
.
order
-
1
;
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
V2
(
&
y
,
&
x
);
}
}
/* call _LogSoftmax function */
/* call _LogSoftmax function */
...
...
source/tensor/function/Loss.cu
查看文件 @
823abb4f
...
@@ -74,7 +74,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -74,7 +74,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
where gold_i is the gold standard and output_i is the model prediction
where gold_i is the gold standard and output_i is the model prediction
*/
*/
if(LFName == SQUAREDERROR){
if(LFName == SQUAREDERROR){
XTensor * diff = NewTensor(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem);
XTensor * diff = NewTensor
V2
(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem);
_Sum(gold, y, diff, -1.0F);
_Sum(gold, y, diff, -1.0F);
_PowerMe(diff, 2.0F);
_PowerMe(diff, 2.0F);
_ScaleAndShiftMe(diff, 0.5F, 0.0F);
_ScaleAndShiftMe(diff, 0.5F, 0.0F);
...
@@ -84,7 +84,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -84,7 +84,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
int diffOrder = diff->order - 1;
int diffOrder = diff->order - 1;
int * diffDimSize = new int[diffOrder];
int * diffDimSize = new int[diffOrder];
memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int));
memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int));
XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem);
XTensor * diffNew = NewTensor
V2
(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem);
int reducePlace = diff->dimSize[0] == 1 ? 1 : 0;
int reducePlace = diff->dimSize[0] == 1 ? 1 : 0;
_ReduceSum(diff, diffNew, reducePlace);
_ReduceSum(diff, diffNew, reducePlace);
if (diffNew->order == 1) {
if (diffNew->order == 1) {
...
@@ -107,7 +107,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -107,7 +107,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
where gold and output are distributions
where gold and output are distributions
*/
*/
if(LFName == CROSSENTROPY){
if(LFName == CROSSENTROPY){
XTensor * diff = NewTensor(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem);
XTensor * diff = NewTensor
V2
(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem);
_CopyValues(y, diff);
_CopyValues(y, diff);
_LogMe(diff);
_LogMe(diff);
_Multiply(gold, diff, diff);
_Multiply(gold, diff, diff);
...
@@ -118,7 +118,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -118,7 +118,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
int diffOrder = diff->order - 1;
int diffOrder = diff->order - 1;
int * diffDimSize = new int[diffOrder];
int * diffDimSize = new int[diffOrder];
memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int));
memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int));
XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem);
XTensor * diffNew = NewTensor
V2
(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem);
int reducePlace = diff->dimSize[0] == 1 ? 1 : 0;
int reducePlace = diff->dimSize[0] == 1 ? 1 : 0;
_ReduceSum(diff, diffNew, reducePlace);
_ReduceSum(diff, diffNew, reducePlace);
if (diffNew->order == 1) {
if (diffNew->order == 1) {
...
@@ -142,8 +142,8 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -142,8 +142,8 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
e_i = 0 otherwise
e_i = 0 otherwise
*/
*/
if(LFName == ONEHOTERROR){
if(LFName == ONEHOTERROR){
XTensor * diff = NewTensor(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem);
XTensor * diff = NewTensor
V2
(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem);
XTensor * yOnehot = NewTensor(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem);
XTensor * yOnehot = NewTensor
V2
(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem);
_CopyValues(y, yOnehot);
_CopyValues(y, yOnehot);
_Multiply(gold, y, yOnehot);
_Multiply(gold, y, yOnehot);
_Sum(gold, yOnehot, diff, -1.0F);
_Sum(gold, yOnehot, diff, -1.0F);
...
@@ -155,7 +155,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
...
@@ -155,7 +155,7 @@ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName,
int diffOrder = diff->order - 1;
int diffOrder = diff->order - 1;
int * diffDimSize = new int[diffOrder];
int * diffDimSize = new int[diffOrder];
memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int));
memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int));
XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem);
XTensor * diffNew = NewTensor
V2
(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem);
int reducePlace = diff->dimSize[0] == 1 ? 1 : 0;
int reducePlace = diff->dimSize[0] == 1 ? 1 : 0;
_ReduceSum(diff, diffNew, reducePlace);
_ReduceSum(diff, diffNew, reducePlace);
if (diffNew->order == 1) {
if (diffNew->order == 1) {
...
...
source/tensor/function/Rectify.cpp
查看文件 @
823abb4f
...
@@ -81,7 +81,7 @@ XTensor Rectify(const XTensor &x)
...
@@ -81,7 +81,7 @@ XTensor Rectify(const XTensor &x)
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
)
void
Rectify
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
V2
(
&
y
,
&
x
);
}
}
/* call _Rectify function */
/* call _Rectify function */
...
...
source/tensor/function/Sigmoid.cpp
查看文件 @
823abb4f
...
@@ -84,7 +84,7 @@ XTensor Sigmoid(const XTensor &x)
...
@@ -84,7 +84,7 @@ XTensor Sigmoid(const XTensor &x)
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
)
void
Sigmoid
(
const
XTensor
&
x
,
XTensor
&
y
)
{
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
V2
(
&
y
,
&
x
);
}
}
/* call _Sigmoid function */
/* call _Sigmoid function */
...
...
source/tensor/function/Softmax.cpp
查看文件 @
823abb4f
...
@@ -54,8 +54,8 @@ void _Softmax(const XTensor * x, XTensor * y, int leadDim)
...
@@ -54,8 +54,8 @@ void _Softmax(const XTensor * x, XTensor * y, int leadDim)
XTensor
*
max
=
NULL
;
XTensor
*
max
=
NULL
;
XTensor
*
sum
=
NULL
;
XTensor
*
sum
=
NULL
;
max
=
NewTensorBuf
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
max
=
NewTensorBuf
V2
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
sum
=
NewTensorBuf
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
sum
=
NewTensorBuf
V2
(
x
->
order
-
1
,
dimSize
,
x
->
dataType
,
x
->
denseRatio
,
x
->
devID
,
mem
);
_ReduceMax
(
x
,
max
,
leadDim
);
_ReduceMax
(
x
,
max
,
leadDim
);
_ReduceSum
(
x
,
sum
,
leadDim
,
max
,
1.0
F
,
true
);
_ReduceSum
(
x
,
sum
,
leadDim
,
max
,
1.0
F
,
true
);
...
@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
...
@@ -157,7 +157,7 @@ void Softmax(const XTensor &x, XTensor &y, int leadDim)
ld
=
x
.
order
-
1
;
ld
=
x
.
order
-
1
;
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
if
(
!
y
.
isInit
||
!
IsSameShaped
(
y
,
x
))
{
InitTensor
(
&
y
,
&
x
);
InitTensor
V2
(
&
y
,
&
x
);
}
}
/* call _Softmax function */
/* call _Softmax function */
...
...
source/tensor/function/Softmax.cu
查看文件 @
823abb4f
...
@@ -381,7 +381,7 @@ void _CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
...
@@ -381,7 +381,7 @@ void _CudaSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * ytmp = NewTensor(y);
XTensor * ytmp = NewTensor(y);
/* make a matrix to keep \beta */
/* make a matrix to keep \beta */
XTensor * beta = NewTensor(y->order - 1, dimSize, y->dataType, y->denseRatio, y->devID, y->mem);
XTensor * beta = NewTensor
V2
(y->order - 1, dimSize, y->dataType, y->denseRatio, y->devID, y->mem);
/* \beta = \sum_i (dE/dy_i * y_i) */
/* \beta = \sum_i (dE/dy_i * y_i) */
_Multiply(dedy, y, ytmp, 0, 0);
_Multiply(dedy, y, ytmp, 0, 0);
...
...
source/tensor/loss/CrossEntropy.cpp
查看文件 @
823abb4f
...
@@ -353,7 +353,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
...
@@ -353,7 +353,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
dimSize
[
i
-
1
]
=
output
->
dimSize
[
i
];
dimSize
[
i
-
1
]
=
output
->
dimSize
[
i
];
}
}
XTensor
*
lossBuf
=
NewTensorBuf
(
output
->
order
-
1
,
dimSize
,
output
->
dataType
,
output
->
denseRatio
,
XTensor
*
lossBuf
=
NewTensorBuf
V2
(
output
->
order
-
1
,
dimSize
,
output
->
dataType
,
output
->
denseRatio
,
output
->
devID
,
output
->
mem
);
output
->
devID
,
output
->
mem
);
_CrossEntropy
(
output
,
gold
,
lossBuf
,
weight
,
padding
,
leadingDim
);
_CrossEntropy
(
output
,
gold
,
lossBuf
,
weight
,
padding
,
leadingDim
);
...
@@ -366,7 +366,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
...
@@ -366,7 +366,7 @@ DTYPE _CrossEntropy(const XTensor * output, const XTensor * gold,
nonZeroNum
=
lossBuf
->
unitNum
;
nonZeroNum
=
lossBuf
->
unitNum
;
}
}
else
{
else
{
XTensor
*
tmp
=
NewTensorBuf
(
padding
,
padding
->
devID
,
padding
->
mem
);
XTensor
*
tmp
=
NewTensorBuf
V2
(
padding
,
padding
->
devID
,
padding
->
mem
);
_IsNonZero
(
padding
,
tmp
);
_IsNonZero
(
padding
,
tmp
);
nonZeroNum
=
(
int
)
_ReduceSumAll
(
tmp
);
nonZeroNum
=
(
int
)
_ReduceSumAll
(
tmp
);
DelTensorBuf
(
tmp
);
DelTensorBuf
(
tmp
);
...
...
source/tensor/loss/CrossEntropy.cu
查看文件 @
823abb4f
...
@@ -57,8 +57,8 @@ void _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
...
@@ -57,8 +57,8 @@ void _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
{
{
int n = leadingDim < 0 ? output->order - 1 : leadingDim;
int n = leadingDim < 0 ? output->order - 1 : leadingDim;
XTensor * interBuf1 = NewTensorBuf(output, output->devID, output->mem);
XTensor * interBuf1 = NewTensorBuf
V2
(output, output->devID, output->mem);
XTensor * interBuf2 = NewTensorBuf(output, output->devID, output->mem);
XTensor * interBuf2 = NewTensorBuf
V2
(output, output->devID, output->mem);
_Log(output, interBuf1);
_Log(output, interBuf1);
_Multiply(gold, interBuf1, interBuf2);
_Multiply(gold, interBuf1, interBuf2);
...
@@ -118,7 +118,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
...
@@ -118,7 +118,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
dimSize[i - 1] = output->dimSize[i];
dimSize[i - 1] = output->dimSize[i];
}
}
XTensor * lossBuf = NewTensorBuf(output->order - 1, dimSize, output->dataType, output->denseRatio,
XTensor * lossBuf = NewTensorBuf
V2
(output->order - 1, dimSize, output->dataType, output->denseRatio,
output->devID, output->mem);
output->devID, output->mem);
_CudaCrossEntropyFast(output, gold, lossBuf, weight, padding, leadingDim);
_CudaCrossEntropyFast(output, gold, lossBuf, weight, padding, leadingDim);
...
@@ -131,7 +131,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
...
@@ -131,7 +131,7 @@ DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
nonZeroNum = lossBuf->unitNum;
nonZeroNum = lossBuf->unitNum;
}
}
else {
else {
XTensor * tmp = NewTensorBuf(padding, padding->devID, padding->mem);
XTensor * tmp = NewTensorBuf
V2
(padding, padding->devID, padding->mem);
_IsNonZero(padding, tmp);
_IsNonZero(padding, tmp);
nonZeroNum = (int)_ReduceSumAll(tmp);
nonZeroNum = (int)_ReduceSumAll(tmp);
DelTensorBuf(tmp);
DelTensorBuf(tmp);
...
...
source/tensor/test/TAbsolute.cpp
查看文件 @
823abb4f
...
@@ -52,9 +52,9 @@ bool TestAbsolute1()
...
@@ -52,9 +52,9 @@ bool TestAbsolute1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
order
,
dimSize
);
XTensor
*
a
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -74,9 +74,9 @@ bool TestAbsolute1()
...
@@ -74,9 +74,9 @@ bool TestAbsolute1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TClip.cpp
查看文件 @
823abb4f
...
@@ -53,9 +53,9 @@ bool TestClip1()
...
@@ -53,9 +53,9 @@ bool TestClip1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -77,9 +77,9 @@ bool TestClip1()
...
@@ -77,9 +77,9 @@ bool TestClip1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TCompare.cpp
查看文件 @
823abb4f
...
@@ -53,9 +53,9 @@ bool TestCompare1()
...
@@ -53,9 +53,9 @@ bool TestCompare1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -77,9 +77,9 @@ bool TestCompare1()
...
@@ -77,9 +77,9 @@ bool TestCompare1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TConcatenate.cpp
查看文件 @
823abb4f
...
@@ -74,9 +74,9 @@ bool TestConcatenate1()
...
@@ -74,9 +74,9 @@ bool TestConcatenate1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -100,9 +100,9 @@ bool TestConcatenate1()
...
@@ -100,9 +100,9 @@ bool TestConcatenate1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -203,9 +203,9 @@ bool TestConcatenate2()
...
@@ -203,9 +203,9 @@ bool TestConcatenate2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -229,9 +229,9 @@ bool TestConcatenate2()
...
@@ -229,9 +229,9 @@ bool TestConcatenate2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -330,9 +330,9 @@ bool TestConcatenate3()
...
@@ -330,9 +330,9 @@ bool TestConcatenate3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -356,9 +356,9 @@ bool TestConcatenate3()
...
@@ -356,9 +356,9 @@ bool TestConcatenate3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -454,9 +454,9 @@ bool TestConcatenate4()
...
@@ -454,9 +454,9 @@ bool TestConcatenate4()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -476,9 +476,9 @@ bool TestConcatenate4()
...
@@ -476,9 +476,9 @@ bool TestConcatenate4()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TConcatenateSolely.cpp
查看文件 @
823abb4f
...
@@ -75,9 +75,9 @@ bool TestConcatenateSolely1()
...
@@ -75,9 +75,9 @@ bool TestConcatenateSolely1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -99,9 +99,9 @@ bool TestConcatenateSolely1()
...
@@ -99,9 +99,9 @@ bool TestConcatenateSolely1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -200,9 +200,9 @@ bool TestConcatenateSolely2()
...
@@ -200,9 +200,9 @@ bool TestConcatenateSolely2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -224,9 +224,9 @@ bool TestConcatenateSolely2()
...
@@ -224,9 +224,9 @@ bool TestConcatenateSolely2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -323,9 +323,9 @@ bool TestConcatenateSolely3()
...
@@ -323,9 +323,9 @@ bool TestConcatenateSolely3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -347,9 +347,9 @@ bool TestConcatenateSolely3()
...
@@ -347,9 +347,9 @@ bool TestConcatenateSolely3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
...
...
source/tensor/test/TConvertDataType.cpp
查看文件 @
823abb4f
...
@@ -53,8 +53,8 @@ bool TestConvertDataType1()
...
@@ -53,8 +53,8 @@ bool TestConvertDataType1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
,
X_INT
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_INT
);
/* initialize variables */
/* initialize variables */
a
->
SetData
(
aData
,
aUnitNum
);
a
->
SetData
(
aData
,
aUnitNum
);
...
@@ -71,8 +71,8 @@ bool TestConvertDataType1()
...
@@ -71,8 +71,8 @@ bool TestConvertDataType1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_INT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
aGPU
->
SetData
(
aData
,
aUnitNum
);
aGPU
->
SetData
(
aData
,
aUnitNum
);
...
@@ -128,8 +128,8 @@ bool TestConvertDataType2()
...
@@ -128,8 +128,8 @@ bool TestConvertDataType2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
,
X_INT
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_INT
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
/* initialize variables */
/* initialize variables */
a
->
SetData
(
aData
,
aUnitNum
);
a
->
SetData
(
aData
,
aUnitNum
);
...
@@ -146,8 +146,8 @@ bool TestConvertDataType2()
...
@@ -146,8 +146,8 @@ bool TestConvertDataType2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
aGPU
->
SetData
(
aData
,
aUnitNum
);
aGPU
->
SetData
(
aData
,
aUnitNum
);
...
@@ -226,9 +226,9 @@ bool TestConvertDataType3()
...
@@ -226,9 +226,9 @@ bool TestConvertDataType3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
order
,
dimSize1
,
X_FLOAT
,
1.0
F
,
-
1
);
XTensor
*
a
=
NewTensor
V2
(
order
,
dimSize1
,
X_FLOAT
,
1.0
F
,
-
1
);
XTensor
*
b
=
NewTensor
(
order
,
dimSize1
,
X_FLOAT16
,
1.0
F
,
-
1
);
XTensor
*
b
=
NewTensor
V2
(
order
,
dimSize1
,
X_FLOAT16
,
1.0
F
,
-
1
);
XTensor
*
c
=
NewTensor
(
order
,
dimSize1
,
X_FLOAT
,
1.0
F
,
-
1
);
XTensor
*
c
=
NewTensor
V2
(
order
,
dimSize1
,
X_FLOAT
,
1.0
F
,
-
1
);
/* initialize variables */
/* initialize variables */
a
->
SetData
(
data1
,
unitNum1
);
a
->
SetData
(
data1
,
unitNum1
);
...
@@ -245,12 +245,12 @@ bool TestConvertDataType3()
...
@@ -245,12 +245,12 @@ bool TestConvertDataType3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
order
,
dimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
order
,
dimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
order
,
dimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
order
,
dimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
(
order
,
dimSize1
,
X_FLOAT16
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
V2
(
order
,
dimSize1
,
X_FLOAT16
,
1.0
F
,
0
);
XTensor
*
dGPU
=
NewTensor
(
order
,
dimSize2
,
X_FLOAT16
,
1.0
F
,
0
);
XTensor
*
dGPU
=
NewTensor
V2
(
order
,
dimSize2
,
X_FLOAT16
,
1.0
F
,
0
);
XTensor
*
eGPU
=
NewTensor
(
order
,
dimSize3
,
X_FLOAT16
,
1.0
F
,
0
);
XTensor
*
eGPU
=
NewTensor
V2
(
order
,
dimSize3
,
X_FLOAT16
,
1.0
F
,
0
);
XTensor
*
fGPU
=
NewTensor
(
order
,
dimSize3
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
fGPU
=
NewTensor
V2
(
order
,
dimSize3
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
aGPU
->
SetData
(
data1
,
unitNum1
);
aGPU
->
SetData
(
data1
,
unitNum1
);
...
...
source/tensor/test/TCopyIndexed.cpp
查看文件 @
823abb4f
...
@@ -85,11 +85,11 @@ bool TestCopyIndexed1()
...
@@ -85,11 +85,11 @@ bool TestCopyIndexed1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
sIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
sIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -114,11 +114,11 @@ bool TestCopyIndexed1()
...
@@ -114,11 +114,11 @@ bool TestCopyIndexed1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -230,11 +230,11 @@ bool TestCopyIndexed2()
...
@@ -230,11 +230,11 @@ bool TestCopyIndexed2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
sIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
sIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -259,11 +259,11 @@ bool TestCopyIndexed2()
...
@@ -259,11 +259,11 @@ bool TestCopyIndexed2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -375,11 +375,11 @@ bool TestCopyIndexed3()
...
@@ -375,11 +375,11 @@ bool TestCopyIndexed3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
sIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
sIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -404,11 +404,11 @@ bool TestCopyIndexed3()
...
@@ -404,11 +404,11 @@ bool TestCopyIndexed3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -520,11 +520,11 @@ bool TestCopyIndexed4()
...
@@ -520,11 +520,11 @@ bool TestCopyIndexed4()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
sIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
sIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -549,11 +549,11 @@ bool TestCopyIndexed4()
...
@@ -549,11 +549,11 @@ bool TestCopyIndexed4()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -665,11 +665,11 @@ bool TestCopyIndexed5()
...
@@ -665,11 +665,11 @@ bool TestCopyIndexed5()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
sIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
sIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
tIndex
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -694,11 +694,11 @@ bool TestCopyIndexed5()
...
@@ -694,11 +694,11 @@ bool TestCopyIndexed5()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
sIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
tIndexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TCopyValues.cpp
查看文件 @
823abb4f
...
@@ -45,8 +45,8 @@ bool TestCopyValues1()
...
@@ -45,8 +45,8 @@ bool TestCopyValues1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -65,8 +65,8 @@ bool TestCopyValues1()
...
@@ -65,8 +65,8 @@ bool TestCopyValues1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TCos.cpp
查看文件 @
823abb4f
...
@@ -52,9 +52,9 @@ bool TestCos1()
...
@@ -52,9 +52,9 @@ bool TestCos1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
order
,
dimSize
);
XTensor
*
a
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -74,9 +74,9 @@ bool TestCos1()
...
@@ -74,9 +74,9 @@ bool TestCos1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TCrossEntropy.cpp
查看文件 @
823abb4f
...
@@ -54,9 +54,9 @@ bool TestCrossEntropy1()
...
@@ -54,9 +54,9 @@ bool TestCrossEntropy1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
loss
=
NewTensor1D
(
1
);
XTensor
*
loss
=
NewTensor1D
V2
(
1
);
/* initialize variables */
/* initialize variables */
output
->
SetData
(
outputData
,
unitNum
);
output
->
SetData
(
outputData
,
unitNum
);
...
@@ -76,9 +76,9 @@ bool TestCrossEntropy1()
...
@@ -76,9 +76,9 @@ bool TestCrossEntropy1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
lossGPU
=
NewTensor1D
(
1
,
X_FLOAT
,
0
);
XTensor
*
lossGPU
=
NewTensor1D
V2
(
1
,
X_FLOAT
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetData
(
outputData
,
unitNum
);
outputGPU
->
SetData
(
outputData
,
unitNum
);
...
@@ -151,8 +151,8 @@ bool TestCrossEntropy2()
...
@@ -151,8 +151,8 @@ bool TestCrossEntropy2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
output
->
SetData
(
outputData
,
unitNum
);
output
->
SetData
(
outputData
,
unitNum
);
...
@@ -179,8 +179,8 @@ bool TestCrossEntropy2()
...
@@ -179,8 +179,8 @@ bool TestCrossEntropy2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetData
(
outputData
,
unitNum
);
outputGPU
->
SetData
(
outputData
,
unitNum
);
...
@@ -259,10 +259,10 @@ bool TestCrossEntropy3()
...
@@ -259,10 +259,10 @@ bool TestCrossEntropy3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
loss
=
NewTensor1D
(
4
);
XTensor
*
loss
=
NewTensor1D
V2
(
4
);
XTensor
*
weight
=
NewTensor
(
wOrder
,
wDimSize
);
XTensor
*
weight
=
NewTensor
V2
(
wOrder
,
wDimSize
);
/* initialize variables */
/* initialize variables */
output
->
SetData
(
outputData
,
unitNum
);
output
->
SetData
(
outputData
,
unitNum
);
...
@@ -284,10 +284,10 @@ bool TestCrossEntropy3()
...
@@ -284,10 +284,10 @@ bool TestCrossEntropy3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
lossGPU
=
NewTensor1D
(
4
,
X_FLOAT
,
0
);
XTensor
*
lossGPU
=
NewTensor1D
V2
(
4
,
X_FLOAT
,
0
);
XTensor
*
weightGPU
=
NewTensor
(
wOrder
,
wDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
weightGPU
=
NewTensor
V2
(
wOrder
,
wDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetData
(
outputData
,
unitNum
);
outputGPU
->
SetData
(
outputData
,
unitNum
);
...
@@ -354,8 +354,8 @@ bool TestCrossEntropy4()
...
@@ -354,8 +354,8 @@ bool TestCrossEntropy4()
DTYPE
error
;
DTYPE
error
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
output
->
SetZeroAll
();
output
->
SetZeroAll
();
...
@@ -374,8 +374,8 @@ bool TestCrossEntropy4()
...
@@ -374,8 +374,8 @@ bool TestCrossEntropy4()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetZeroAll
();
outputGPU
->
SetZeroAll
();
...
...
source/tensor/test/TDiv.cpp
查看文件 @
823abb4f
...
@@ -72,10 +72,10 @@ bool TestDiv1()
...
@@ -72,10 +72,10 @@ bool TestDiv1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
tMe
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
tMe
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -99,10 +99,10 @@ bool TestDiv1()
...
@@ -99,10 +99,10 @@ bool TestDiv1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TDivDim.cpp
查看文件 @
823abb4f
...
@@ -63,10 +63,10 @@ bool TestDivDim1()
...
@@ -63,10 +63,10 @@ bool TestDivDim1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
bOrder
,
bDimSize
);
XTensor
*
b
=
NewTensor
V2
(
bOrder
,
bDimSize
);
XTensor
*
c
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
c
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
cUser
;
XTensor
cUser
;
/* initialize variables */
/* initialize variables */
...
@@ -90,10 +90,10 @@ bool TestDivDim1()
...
@@ -90,10 +90,10 @@ bool TestDivDim1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
cUserGPU
;
XTensor
cUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -177,10 +177,10 @@ bool TestDivDim2()
...
@@ -177,10 +177,10 @@ bool TestDivDim2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
bOrder
,
bDimSize
);
XTensor
*
b
=
NewTensor
V2
(
bOrder
,
bDimSize
);
XTensor
*
c
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
c
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
cUser
;
XTensor
cUser
;
/* initialize variables */
/* initialize variables */
...
@@ -204,10 +204,10 @@ bool TestDivDim2()
...
@@ -204,10 +204,10 @@ bool TestDivDim2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
cUserGPU
;
XTensor
cUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TDropout.cpp
查看文件 @
823abb4f
...
@@ -45,8 +45,8 @@ bool TestDropout1()
...
@@ -45,8 +45,8 @@ bool TestDropout1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
yUser
;
XTensor
yUser
;
/* initialize variables */
/* initialize variables */
...
@@ -83,8 +83,8 @@ bool TestDropout1()
...
@@ -83,8 +83,8 @@ bool TestDropout1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
yUserGPU
;
XTensor
yUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -151,10 +151,10 @@ bool TestDropout2()
...
@@ -151,10 +151,10 @@ bool TestDropout2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
_SetDataFixedFloat
(
x
,
1.0
F
);
_SetDataFixedFloat
(
x
,
1.0
F
);
...
@@ -177,10 +177,10 @@ bool TestDropout2()
...
@@ -177,10 +177,10 @@ bool TestDropout2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
_SetDataFixedFloat
(
xGPU
,
1.0
F
);
_SetDataFixedFloat
(
xGPU
,
1.0
F
);
...
...
source/tensor/test/TExp.cpp
查看文件 @
823abb4f
...
@@ -52,9 +52,9 @@ bool TestExp1()
...
@@ -52,9 +52,9 @@ bool TestExp1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
order
,
dimSize
);
XTensor
*
a
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -76,9 +76,9 @@ bool TestExp1()
...
@@ -76,9 +76,9 @@ bool TestExp1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TGather.cpp
查看文件 @
823abb4f
...
@@ -75,9 +75,9 @@ bool TestGather1()
...
@@ -75,9 +75,9 @@ bool TestGather1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
index
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
*
index
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -98,9 +98,9 @@ bool TestGather1()
...
@@ -98,9 +98,9 @@ bool TestGather1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
sOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
indexGPU
=
NewTensor
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
*
indexGPU
=
NewTensor
V2
(
indexOrder
,
indexDimSize
,
X_INT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/THardTanH.cpp
查看文件 @
823abb4f
...
@@ -52,8 +52,8 @@ bool TestHardTanH1()
...
@@ -52,8 +52,8 @@ bool TestHardTanH1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
yUser
;
XTensor
yUser
;
/* initialize variables */
/* initialize variables */
...
@@ -72,8 +72,8 @@ bool TestHardTanH1()
...
@@ -72,8 +72,8 @@ bool TestHardTanH1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
yUserGPU
;
XTensor
yUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -141,10 +141,10 @@ bool TestHardTanH2()
...
@@ -141,10 +141,10 @@ bool TestHardTanH2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
unitNum
);
x
->
SetData
(
xData
,
unitNum
);
...
@@ -167,10 +167,10 @@ bool TestHardTanH2()
...
@@ -167,10 +167,10 @@ bool TestHardTanH2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
unitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
...
...
source/tensor/test/TIdentity.cpp
查看文件 @
823abb4f
...
@@ -50,8 +50,8 @@ bool TestIdentity1()
...
@@ -50,8 +50,8 @@ bool TestIdentity1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
yUser
;
XTensor
yUser
;
/* initialize variables */
/* initialize variables */
...
@@ -70,8 +70,8 @@ bool TestIdentity1()
...
@@ -70,8 +70,8 @@ bool TestIdentity1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
yUserGPU
;
XTensor
yUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -129,10 +129,10 @@ bool TestIdentity2()
...
@@ -129,10 +129,10 @@ bool TestIdentity2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
unitNum
);
x
->
SetData
(
xData
,
unitNum
);
...
@@ -155,10 +155,10 @@ bool TestIdentity2()
...
@@ -155,10 +155,10 @@ bool TestIdentity2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
unitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
...
...
source/tensor/test/TLog.cpp
查看文件 @
823abb4f
...
@@ -52,9 +52,9 @@ bool TestLog1()
...
@@ -52,9 +52,9 @@ bool TestLog1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
order
,
dimSize
);
XTensor
*
a
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -74,9 +74,9 @@ bool TestLog1()
...
@@ -74,9 +74,9 @@ bool TestLog1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TLogSoftmax.cpp
查看文件 @
823abb4f
...
@@ -50,8 +50,8 @@ bool TestLogSoftmax1()
...
@@ -50,8 +50,8 @@ bool TestLogSoftmax1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
yUser
;
XTensor
yUser
;
/* initialize variables */
/* initialize variables */
...
@@ -70,8 +70,8 @@ bool TestLogSoftmax1()
...
@@ -70,8 +70,8 @@ bool TestLogSoftmax1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
yUserGPU
;
XTensor
yUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -130,11 +130,11 @@ bool TestLogSoftmax2()
...
@@ -130,11 +130,11 @@ bool TestLogSoftmax2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
g
=
NewTensor
(
order
,
dimSize
);
XTensor
*
g
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
unitNum
);
x
->
SetData
(
xData
,
unitNum
);
...
@@ -158,11 +158,11 @@ bool TestLogSoftmax2()
...
@@ -158,11 +158,11 @@ bool TestLogSoftmax2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
unitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
...
@@ -234,11 +234,11 @@ bool TestLogSoftmax3()
...
@@ -234,11 +234,11 @@ bool TestLogSoftmax3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
g
=
NewTensor
(
order
,
dimSize
);
XTensor
*
g
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
unitNum
);
x
->
SetData
(
xData
,
unitNum
);
...
@@ -262,11 +262,11 @@ bool TestLogSoftmax3()
...
@@ -262,11 +262,11 @@ bool TestLogSoftmax3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
gGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
unitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
...
...
source/tensor/test/TLoss.cpp
查看文件 @
823abb4f
...
@@ -50,8 +50,8 @@ bool TestLoss1()
...
@@ -50,8 +50,8 @@ bool TestLoss1()
DTYPE
error
;
DTYPE
error
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
output
->
SetZeroAll
();
output
->
SetZeroAll
();
...
@@ -70,8 +70,8 @@ bool TestLoss1()
...
@@ -70,8 +70,8 @@ bool TestLoss1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetZeroAll
();
outputGPU
->
SetZeroAll
();
...
@@ -128,8 +128,8 @@ bool TestLoss2()
...
@@ -128,8 +128,8 @@ bool TestLoss2()
DTYPE
error
;
DTYPE
error
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
output
->
SetZeroAll
();
output
->
SetZeroAll
();
...
@@ -148,8 +148,8 @@ bool TestLoss2()
...
@@ -148,8 +148,8 @@ bool TestLoss2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetZeroAll
();
outputGPU
->
SetZeroAll
();
...
@@ -216,8 +216,8 @@ bool TestLoss3()
...
@@ -216,8 +216,8 @@ bool TestLoss3()
DTYPE
error
;
DTYPE
error
;
/* create tensors */
/* create tensors */
XTensor
*
output
=
NewTensor
(
order
,
dimSize
);
XTensor
*
output
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
(
order
,
dimSize
);
XTensor
*
gold
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
output
->
SetData
(
outputData
,
unitNum
);
output
->
SetData
(
outputData
,
unitNum
);
...
@@ -234,8 +234,8 @@ bool TestLoss3()
...
@@ -234,8 +234,8 @@ bool TestLoss3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
outputGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
outputGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
goldGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
outputGPU
->
SetData
(
outputData
,
unitNum
);
outputGPU
->
SetData
(
outputData
,
unitNum
);
...
...
source/tensor/test/TMatrixMul.cpp
查看文件 @
823abb4f
...
@@ -73,9 +73,9 @@ bool TestMatrixMul1()
...
@@ -73,9 +73,9 @@ bool TestMatrixMul1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -95,9 +95,9 @@ bool TestMatrixMul1()
...
@@ -95,9 +95,9 @@ bool TestMatrixMul1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -187,9 +187,9 @@ bool TestMatrixMul2()
...
@@ -187,9 +187,9 @@ bool TestMatrixMul2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -209,9 +209,9 @@ bool TestMatrixMul2()
...
@@ -209,9 +209,9 @@ bool TestMatrixMul2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -321,9 +321,9 @@ bool TestMatrixMul3()
...
@@ -321,9 +321,9 @@ bool TestMatrixMul3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -343,9 +343,9 @@ bool TestMatrixMul3()
...
@@ -343,9 +343,9 @@ bool TestMatrixMul3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -444,9 +444,9 @@ bool TestMatrixMul4()
...
@@ -444,9 +444,9 @@ bool TestMatrixMul4()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -466,9 +466,9 @@ bool TestMatrixMul4()
...
@@ -466,9 +466,9 @@ bool TestMatrixMul4()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TMatrixMul2D.cpp
查看文件 @
823abb4f
...
@@ -73,9 +73,9 @@ bool TestMatrixMul2D1()
...
@@ -73,9 +73,9 @@ bool TestMatrixMul2D1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -93,9 +93,9 @@ bool TestMatrixMul2D1()
...
@@ -93,9 +93,9 @@ bool TestMatrixMul2D1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -183,9 +183,9 @@ bool TestMatrixMul2D2()
...
@@ -183,9 +183,9 @@ bool TestMatrixMul2D2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -203,9 +203,9 @@ bool TestMatrixMul2D2()
...
@@ -203,9 +203,9 @@ bool TestMatrixMul2D2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* Initialize variables */
/* Initialize variables */
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
sGPU1
->
SetData
(
sData1
,
sUnitNum1
);
...
...
source/tensor/test/TMatrixMul2DParallel.cpp
查看文件 @
823abb4f
...
@@ -73,9 +73,9 @@ bool TestMatrixMul2DParallel1()
...
@@ -73,9 +73,9 @@ bool TestMatrixMul2DParallel1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
@@ -149,9 +149,9 @@ bool TestMatrixMul2DParallel2()
...
@@ -149,9 +149,9 @@ bool TestMatrixMul2DParallel2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
/* initialize variables */
/* initialize variables */
s1
->
SetData
(
sData1
,
sUnitNum1
);
s1
->
SetData
(
sData1
,
sUnitNum1
);
...
...
source/tensor/test/TMatrixMulBatched.cpp
查看文件 @
823abb4f
...
@@ -73,9 +73,9 @@ bool TestMatrixMulBatched1()
...
@@ -73,9 +73,9 @@ bool TestMatrixMulBatched1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -95,9 +95,9 @@ bool TestMatrixMulBatched1()
...
@@ -95,9 +95,9 @@ bool TestMatrixMulBatched1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -195,9 +195,9 @@ bool TestMatrixMulBatched2()
...
@@ -195,9 +195,9 @@ bool TestMatrixMulBatched2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -217,9 +217,9 @@ bool TestMatrixMulBatched2()
...
@@ -217,9 +217,9 @@ bool TestMatrixMulBatched2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TMerge.cpp
查看文件 @
823abb4f
...
@@ -59,8 +59,8 @@ bool TestMerge1()
...
@@ -59,8 +59,8 @@ bool TestMerge1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -79,8 +79,8 @@ bool TestMerge1()
...
@@ -79,8 +79,8 @@ bool TestMerge1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -168,9 +168,9 @@ bool TestMerge2()
...
@@ -168,9 +168,9 @@ bool TestMerge2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder1
,
tDimSize1
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
);
XTensor
*
t2
=
NewTensor
(
tOrder2
,
tDimSize2
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
);
XTensor
tUser1
;
XTensor
tUser1
;
XTensor
tUser2
;
XTensor
tUser2
;
...
@@ -194,9 +194,9 @@ bool TestMerge2()
...
@@ -194,9 +194,9 @@ bool TestMerge2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU1
;
XTensor
tUserGPU1
;
XTensor
tUserGPU2
;
XTensor
tUserGPU2
;
...
@@ -283,9 +283,9 @@ bool TestMerge3()
...
@@ -283,9 +283,9 @@ bool TestMerge3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
s2
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -312,9 +312,9 @@ bool TestMerge3()
...
@@ -312,9 +312,9 @@ bool TestMerge3()
smallList
->
Clear
();
smallList
->
Clear
();
/* create tensors */
/* create tensors */
XTensor
*
sGPU1
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -399,9 +399,9 @@ bool TestMerge4()
...
@@ -399,9 +399,9 @@ bool TestMerge4()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
s2
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -428,9 +428,9 @@ bool TestMerge4()
...
@@ -428,9 +428,9 @@ bool TestMerge4()
smallList
->
Clear
();
smallList
->
Clear
();
/* create tensors */
/* create tensors */
XTensor
*
sGPU1
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TMultiply.cpp
查看文件 @
823abb4f
...
@@ -72,10 +72,10 @@ bool TestMultiply1()
...
@@ -72,10 +72,10 @@ bool TestMultiply1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s1
=
NewTensor
(
sOrder1
,
sDimSize1
);
XTensor
*
s1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
);
XTensor
*
s2
=
NewTensor
(
sOrder2
,
sDimSize2
);
XTensor
*
s2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
tMe
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
tMe
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -99,10 +99,10 @@ bool TestMultiply1()
...
@@ -99,10 +99,10 @@ bool TestMultiply1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU1
=
NewTensor
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU1
=
NewTensor
V2
(
sOrder1
,
sDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU2
=
NewTensor
V2
(
sOrder2
,
sDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TMultiplyDim.cpp
查看文件 @
823abb4f
...
@@ -62,10 +62,10 @@ bool TestMultiplyDim1()
...
@@ -62,10 +62,10 @@ bool TestMultiplyDim1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
bOrder
,
bDimSize
);
XTensor
*
b
=
NewTensor
V2
(
bOrder
,
bDimSize
);
XTensor
*
c
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
c
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
cUser
;
XTensor
cUser
;
/* initialize variables */
/* initialize variables */
...
@@ -89,10 +89,10 @@ bool TestMultiplyDim1()
...
@@ -89,10 +89,10 @@ bool TestMultiplyDim1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
cUserGPU
;
XTensor
cUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -174,10 +174,10 @@ bool TestMultiplyDim2()
...
@@ -174,10 +174,10 @@ bool TestMultiplyDim2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
bOrder
,
bDimSize
);
XTensor
*
b
=
NewTensor
V2
(
bOrder
,
bDimSize
);
XTensor
*
c
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
c
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
cMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
cUser
;
XTensor
cUser
;
/* initialize variables */
/* initialize variables */
...
@@ -201,10 +201,10 @@ bool TestMultiplyDim2()
...
@@ -201,10 +201,10 @@ bool TestMultiplyDim2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
cMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
cUserGPU
;
XTensor
cUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TNegate.cpp
查看文件 @
823abb4f
...
@@ -48,9 +48,9 @@ bool TestNegate1()
...
@@ -48,9 +48,9 @@ bool TestNegate1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -70,9 +70,9 @@ bool TestNegate1()
...
@@ -70,9 +70,9 @@ bool TestNegate1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -132,9 +132,9 @@ bool TestNegate2()
...
@@ -132,9 +132,9 @@ bool TestNegate2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -154,9 +154,9 @@ bool TestNegate2()
...
@@ -154,9 +154,9 @@ bool TestNegate2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TNormalize.cpp
查看文件 @
823abb4f
...
@@ -103,13 +103,13 @@ bool TestNormalize1()
...
@@ -103,13 +103,13 @@ bool TestNormalize1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
mean
=
NewTensor
(
meanOrder
,
meanDimSize
);
XTensor
*
mean
=
NewTensor
V2
(
meanOrder
,
meanDimSize
);
XTensor
*
var
=
NewTensor
(
varOrder
,
varDimSize
);
XTensor
*
var
=
NewTensor
V2
(
varOrder
,
varDimSize
);
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
bOrder
,
bDimSize
);
XTensor
*
b
=
NewTensor
V2
(
bOrder
,
bDimSize
);
XTensor
*
tMe
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
tMe
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -135,13 +135,13 @@ bool TestNormalize1()
...
@@ -135,13 +135,13 @@ bool TestNormalize1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
meanGPU
=
NewTensor
(
meanOrder
,
meanDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
meanGPU
=
NewTensor
V2
(
meanOrder
,
meanDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
varGPU
=
NewTensor
(
varOrder
,
varDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
varGPU
=
NewTensor
V2
(
varOrder
,
varDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
bOrder
,
bDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TPower.cpp
查看文件 @
823abb4f
...
@@ -53,9 +53,9 @@ bool TestPower1()
...
@@ -53,9 +53,9 @@ bool TestPower1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -77,9 +77,9 @@ bool TestPower1()
...
@@ -77,9 +77,9 @@ bool TestPower1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -144,9 +144,9 @@ bool TestPower2()
...
@@ -144,9 +144,9 @@ bool TestPower2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -168,9 +168,9 @@ bool TestPower2()
...
@@ -168,9 +168,9 @@ bool TestPower2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -235,9 +235,9 @@ bool TestPower3()
...
@@ -235,9 +235,9 @@ bool TestPower3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
a
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
b
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
(
aOrder
,
aDimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
aOrder
,
aDimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -259,9 +259,9 @@ bool TestPower3()
...
@@ -259,9 +259,9 @@ bool TestPower3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
aOrder
,
aDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TRectify.cpp
查看文件 @
823abb4f
...
@@ -49,8 +49,8 @@ bool TestRectify1()
...
@@ -49,8 +49,8 @@ bool TestRectify1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
yUser
;
XTensor
yUser
;
/* initialize variables */
/* initialize variables */
...
@@ -69,8 +69,8 @@ bool TestRectify1()
...
@@ -69,8 +69,8 @@ bool TestRectify1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
yUserGPU
;
XTensor
yUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
@@ -133,10 +133,10 @@ bool TestRectify2()
...
@@ -133,10 +133,10 @@ bool TestRectify2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
x
=
NewTensor
(
order
,
dimSize
);
XTensor
*
x
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
(
order
,
dimSize
);
XTensor
*
y
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedy
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
(
order
,
dimSize
);
XTensor
*
dedx
=
NewTensor
V2
(
order
,
dimSize
);
/* initialize variables */
/* initialize variables */
x
->
SetData
(
xData
,
unitNum
);
x
->
SetData
(
xData
,
unitNum
);
...
@@ -157,10 +157,10 @@ bool TestRectify2()
...
@@ -157,10 +157,10 @@ bool TestRectify2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
xGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
xGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
yGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedyGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
dedxGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
xGPU
->
SetData
(
xData
,
unitNum
);
xGPU
->
SetData
(
xData
,
unitNum
);
...
...
source/tensor/test/TReduceMax.cpp
查看文件 @
823abb4f
...
@@ -69,9 +69,9 @@ bool TestReduceMax1()
...
@@ -69,9 +69,9 @@ bool TestReduceMax1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder1
,
tDimSize1
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
);
XTensor
*
t2
=
NewTensor
(
tOrder2
,
tDimSize2
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
);
XTensor
tUser1
;
XTensor
tUser1
;
XTensor
tUser2
;
XTensor
tUser2
;
...
@@ -95,9 +95,9 @@ bool TestReduceMax1()
...
@@ -95,9 +95,9 @@ bool TestReduceMax1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU1
;
XTensor
tUserGPU1
;
XTensor
tUserGPU2
;
XTensor
tUserGPU2
;
...
...
source/tensor/test/TReduceMean.cpp
查看文件 @
823abb4f
...
@@ -64,9 +64,9 @@ bool TestReduceMean1()
...
@@ -64,9 +64,9 @@ bool TestReduceMean1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t1
=
NewTensor
(
tOrder1
,
tDimSize1
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
);
XTensor
*
t2
=
NewTensor
(
tOrder2
,
tDimSize2
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
);
XTensor
tUser1
;
XTensor
tUser1
;
XTensor
tUser2
;
XTensor
tUser2
;
...
@@ -90,9 +90,9 @@ bool TestReduceMean1()
...
@@ -90,9 +90,9 @@ bool TestReduceMean1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU1
;
XTensor
tUserGPU1
;
XTensor
tUserGPU2
;
XTensor
tUserGPU2
;
...
...
source/tensor/test/TReduceSum.cpp
查看文件 @
823abb4f
...
@@ -71,11 +71,11 @@ bool TestReduceSum1()
...
@@ -71,11 +71,11 @@ bool TestReduceSum1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
shift1
=
NewTensor
(
tOrder1
,
tDimSize1
);
XTensor
*
shift1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
);
XTensor
*
shift2
=
NewTensor
(
tOrder2
,
tDimSize2
);
XTensor
*
shift2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
);
XTensor
*
t1
=
NewTensor
(
tOrder1
,
tDimSize1
);
XTensor
*
t1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
);
XTensor
*
t2
=
NewTensor
(
tOrder2
,
tDimSize2
);
XTensor
*
t2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
);
XTensor
tUser1
;
XTensor
tUser1
;
XTensor
tUser2
;
XTensor
tUser2
;
...
@@ -101,11 +101,11 @@ bool TestReduceSum1()
...
@@ -101,11 +101,11 @@ bool TestReduceSum1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU1
=
NewTensor
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU2
=
NewTensor
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU1
=
NewTensor
V2
(
tOrder1
,
tDimSize1
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU2
=
NewTensor
V2
(
tOrder2
,
tDimSize2
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU1
;
XTensor
tUserGPU1
;
XTensor
tUserGPU2
;
XTensor
tUserGPU2
;
...
@@ -189,9 +189,9 @@ bool TestReduceSum2()
...
@@ -189,9 +189,9 @@ bool TestReduceSum2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -210,8 +210,8 @@ bool TestReduceSum2()
...
@@ -210,8 +210,8 @@ bool TestReduceSum2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -278,9 +278,9 @@ bool TestReduceSum3()
...
@@ -278,9 +278,9 @@ bool TestReduceSum3()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -299,8 +299,8 @@ bool TestReduceSum3()
...
@@ -299,8 +299,8 @@ bool TestReduceSum3()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -367,9 +367,9 @@ bool TestReduceSum4()
...
@@ -367,9 +367,9 @@ bool TestReduceSum4()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -388,8 +388,8 @@ bool TestReduceSum4()
...
@@ -388,8 +388,8 @@ bool TestReduceSum4()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -458,9 +458,9 @@ bool TestReduceSum5()
...
@@ -458,9 +458,9 @@ bool TestReduceSum5()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -479,8 +479,8 @@ bool TestReduceSum5()
...
@@ -479,8 +479,8 @@ bool TestReduceSum5()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -550,9 +550,9 @@ bool TestReduceSum6()
...
@@ -550,9 +550,9 @@ bool TestReduceSum6()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
answer
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -571,8 +571,8 @@ bool TestReduceSum6()
...
@@ -571,8 +571,8 @@ bool TestReduceSum6()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TReduceSumAll.cpp
查看文件 @
823abb4f
...
@@ -49,7 +49,7 @@ bool TestReduceSumAll1()
...
@@ -49,7 +49,7 @@ bool TestReduceSumAll1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
/* initialize variables */
/* initialize variables */
s
->
SetData
(
sData
,
sUnitNum
);
s
->
SetData
(
sData
,
sUnitNum
);
...
@@ -65,7 +65,7 @@ bool TestReduceSumAll1()
...
@@ -65,7 +65,7 @@ bool TestReduceSumAll1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
/* initialize variables */
/* initialize variables */
sGPU
->
SetData
(
sData
,
sUnitNum
);
sGPU
->
SetData
(
sData
,
sUnitNum
);
...
...
source/tensor/test/TReduceSumSquared.cpp
查看文件 @
823abb4f
...
@@ -68,9 +68,9 @@ bool TestReduceSumSquared1()
...
@@ -68,9 +68,9 @@ bool TestReduceSumSquared1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
shift
=
NewTensor
(
shiftOrder
,
shiftDimSize
);
XTensor
*
shift
=
NewTensor
V2
(
shiftOrder
,
shiftDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -90,9 +90,9 @@ bool TestReduceSumSquared1()
...
@@ -90,9 +90,9 @@ bool TestReduceSumSquared1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU
=
NewTensor
(
shiftOrder
,
shiftDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU
=
NewTensor
V2
(
shiftOrder
,
shiftDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
@@ -176,9 +176,9 @@ bool TestReduceSumSquared2()
...
@@ -176,9 +176,9 @@ bool TestReduceSumSquared2()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
shift
=
NewTensor
(
shiftOrder
,
shiftDimSize
);
XTensor
*
shift
=
NewTensor
V2
(
shiftOrder
,
shiftDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -198,9 +198,9 @@ bool TestReduceSumSquared2()
...
@@ -198,9 +198,9 @@ bool TestReduceSumSquared2()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU
=
NewTensor
(
shiftOrder
,
shiftDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
shiftGPU
=
NewTensor
V2
(
shiftOrder
,
shiftDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TReduceVariance.cpp
查看文件 @
823abb4f
...
@@ -68,9 +68,9 @@ bool TestReduceVariance1()
...
@@ -68,9 +68,9 @@ bool TestReduceVariance1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
tOrder
,
tDimSize
);
XTensor
*
t
=
NewTensor
V2
(
tOrder
,
tDimSize
);
XTensor
*
mean
=
NewTensor
(
meanOrder
,
meanDimSize
);
XTensor
*
mean
=
NewTensor
V2
(
meanOrder
,
meanDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -90,9 +90,9 @@ bool TestReduceVariance1()
...
@@ -90,9 +90,9 @@ bool TestReduceVariance1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
tOrder
,
tDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
meanGPU
=
NewTensor
(
meanOrder
,
meanDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
meanGPU
=
NewTensor
V2
(
meanOrder
,
meanDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TRound.cpp
查看文件 @
823abb4f
...
@@ -54,9 +54,9 @@ bool TestRound1()
...
@@ -54,9 +54,9 @@ bool TestRound1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
a
=
NewTensor
(
order
,
dimSize
);
XTensor
*
a
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
(
order
,
dimSize
);
XTensor
*
b
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
(
order
,
dimSize
);
XTensor
*
aMe
=
NewTensor
V2
(
order
,
dimSize
);
XTensor
bUser
;
XTensor
bUser
;
/* initialize variables */
/* initialize variables */
...
@@ -78,9 +78,9 @@ bool TestRound1()
...
@@ -78,9 +78,9 @@ bool TestRound1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensor */
/* create tensor */
XTensor
*
aGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
bGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
aMeGPU
=
NewTensor
V2
(
order
,
dimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
bUserGPU
;
XTensor
bUserGPU
;
/* Initialize variables */
/* Initialize variables */
...
...
source/tensor/test/TScaleAndShift.cpp
查看文件 @
823abb4f
...
@@ -52,9 +52,9 @@ bool TestScaleAndShift1()
...
@@ -52,9 +52,9 @@ bool TestScaleAndShift1()
bool
cpuTest
=
true
;
bool
cpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
s
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
s
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
t
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
*
tMe
=
NewTensor
(
sOrder
,
sDimSize
);
XTensor
*
tMe
=
NewTensor
V2
(
sOrder
,
sDimSize
);
XTensor
tUser
;
XTensor
tUser
;
/* initialize variables */
/* initialize variables */
...
@@ -75,9 +75,9 @@ bool TestScaleAndShift1()
...
@@ -75,9 +75,9 @@ bool TestScaleAndShift1()
bool
gpuTest
=
true
;
bool
gpuTest
=
true
;
/* create tensors */
/* create tensors */
XTensor
*
sGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
sGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
*
tMeGPU
=
NewTensor
V2
(
sOrder
,
sDimSize
,
X_FLOAT
,
1.0
F
,
0
);
XTensor
tUserGPU
;
XTensor
tUserGPU
;
/* initialize variables */
/* initialize variables */
...
...
source/tensor/test/TSelect.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSetAscendingOrder.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSetData.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSigmoid.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSign.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSin.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSoftmax.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSort.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSplit.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSpread.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSub.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSubDim.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSum.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TSumDim.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TTan.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TTopK.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TTranspose.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
source/tensor/test/TUnsqueeze.cpp
查看文件 @
823abb4f
差异被折叠。
点击展开。
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论