Commit 98db6f24 by xuchen

update document

parent f7f33b29
...@@ -30,7 +30,7 @@ NiuTrans.Tensor是小牛开源项目所开发的一个工具包,提供了完 ...@@ -30,7 +30,7 @@ NiuTrans.Tensor是小牛开源项目所开发的一个工具包,提供了完
* 通过function里的FHeader.h可以调用一些激活函数 * 通过function里的FHeader.h可以调用一些激活函数
* 在所创建项目中使用命名空间nts * 在所创建项目中使用命名空间nts
此外,一些必须的环境配置方法请参考 [NiuTrans.Tensor环境配置](http://47.105.50.196/NiuTrans/NiuTrans.Tensor/blob/linye/doc/Configuration.md) 此外,一些必须的环境配置方法请参考 [NiuTrans.Tensor环境配置](http://47.105.50.196/NiuTrans/NiuTrans.Tensor/blob/master/doc/Configuration.md)
### Linux ### Linux
...@@ -42,7 +42,7 @@ NiuTrans.Tensor是小牛开源项目所开发的一个工具包,提供了完 ...@@ -42,7 +42,7 @@ NiuTrans.Tensor是小牛开源项目所开发的一个工具包,提供了完
## 开发团队 ## 开发团队
NiuTrans.Tensor张量计算库由小牛团队开发,成员来自东北大学自然语言处理实验室、小牛翻译、小牛雅智,致力于为深度学习相关研究及工业系统的开发提供完整的张量定义及计算功能。 NiuTrans.Tensor张量计算库由东北大学自然语言处理实验室小牛开源团队开发,致力于为深度学习相关研究及工业系统的开发提供完整的张量定义及计算功能。
## 更新版本 ## 更新版本
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-30
*/
#include "TTmp.h"
#include "../XTensor.h"
#include "../../xc/ultility.h"
#include "../../xc/myCode.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
bool TestTmp1()
{
XTensor a;
XTensor b;
InitTensor4D(&a, 8, 32, 106, 106, X_FLOAT, -1, NULL);
FILE * fReadOrigin = fopen("V:/temp/input.dump", "rb");
a.Read(fReadOrigin, "a-plus-bias");
fclose(fReadOrigin);
b = Softmax(a, 3);
XTensor c;
InitTensor4D(&c, 8, 32, 106, 106, X_FLOAT, -1, NULL);
FILE * fReadResult = fopen("V:/temp/input.dump.result", "rb");
c.Read(fReadResult, "");
fclose(fReadResult);
printf("\n\nThis is CPU!\n");
b.Dump(stderr, "b", 100);
printf("\n\n");
c.Dump(stderr, "c", 100);
bool cpuTest;
cpuTest = b.CheckData(c.data, b.unitNum, 1e-6F);
if(cpuTest == true)
printf("CPU Yeah!");
else
printf("CPU ops..");
exit(1);
#ifdef USE_CUDA
XTensor aGPU;
XTensor bGPU;
InitTensor4D(&aGPU, 8, 32, 106, 106, X_FLOAT, 0, NULL);
InitTensor4D(&bGPU, 8, 32, 106, 106, X_FLOAT, 0, NULL);
fReadOrigin = fopen("V:/temp/input.dump", "rb");
aGPU.Read(fReadOrigin, "a-plus-bias");
fclose(fReadOrigin);
//bGPU = Softmax(aGPU, 3);
_Softmax(&aGPU, &bGPU, 3);
printf("\n\nThis is GPU\n");
bGPU.Dump(stderr, "bGPU", 100);
bool gpuTest;
gpuTest = bGPU.CheckData(c.data, bGPU.unitNum, 1e-4F);
if(gpuTest == true)
printf("GPU Yeah!");
else
printf("GPU ops..");
#endif // USE_CUDA
exit(1);
return 0;
}
bool TestTmp2()
{
XTensor a;
XTensor b;
InitTensor4D(&a, 8, 32, 106, 106, X_FLOAT, -1, NULL);
InitTensor4D(&b, 8, 32, 106, 106, X_FLOAT, -1, NULL);
//FILE * fReadResultGold = fopen("V:/temp/input.dump.gold", "rb");
//a.Read(fReadResultGold, "input");
//fclose(fReadResultGold);
FILE * fReadResult = fopen("V:/temp/input.dump", "rb");
b.Read(fReadResult, "a-plus-bias");
fclose(fReadResult);
ShowData(&b, "");
bool flag = CheckTensorData(a, b, 1e-3F);
if (flag)
printf("yeah");
else
printf("ops.");
exit(1);
return 0;
}
/* other cases */
/*
TODO!!
*/
/* test for Tmp Function */
bool TestTmp()
{
XPRINT(0, stdout, "[TEST Temp] temporary test\n");
bool returnFlag = true, caseFlag = true;
///* case 1 test */
//caseFlag = TestTmp1();
//if (!caseFlag) {
// returnFlag = false;
// XPRINT(0, stdout, ">> case 1 failed!\n");
//}
//else
// XPRINT(0, stdout, ">> case 1 passed!\n");
/* case 2 test */
caseFlag = TestTmp2();
if (!caseFlag) {
returnFlag = false;
XPRINT(0, stdout, ">> case 2 failed!\n");
}
else
XPRINT(0, stdout, ">> case 2 passed!\n");
/* other cases test */
/*
TODO!!
*/
if (returnFlag) {
XPRINT(0, stdout, ">> All Passed!\n");
}
else
XPRINT(0, stdout, ">> Failed!\n");
XPRINT(0, stdout, "\n");
return returnFlag;
}
} // namespace nts(NiuTrans.Tensor)
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-13
*/
#ifndef __TEST_TMP_H__
#define __TEST_TMP_H__
#include "../core/CHeader.h"
#include "../function/FHeader.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
bool TestTmp();
} // namespace nts(NiuTrans.Tensor)
#endif // __TEST_TMP_H__
...@@ -29,8 +29,6 @@ bool Test() ...@@ -29,8 +29,6 @@ bool Test()
bool wrong = false; bool wrong = false;
XPRINT(0, stdout, "Testing the XTensor utilites ... \n\n"); XPRINT(0, stdout, "Testing the XTensor utilites ... \n\n");
//wrong = !TestTmp() || wrong;
wrong = !TestAbsolute() || wrong; wrong = !TestAbsolute() || wrong;
wrong = !TestClip() || wrong; wrong = !TestClip() || wrong;
wrong = !TestConcatenate() || wrong; wrong = !TestConcatenate() || wrong;
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#ifndef __TEST_H__ #ifndef __TEST_H__
#define __TEST_H__ #define __TEST_H__
#include "TTmp.h"
#include "TAbsolute.h" #include "TAbsolute.h"
#include "TClip.h" #include "TClip.h"
#include "TConcatenate.h" #include "TConcatenate.h"
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论