TMultiply.cpp 4.44 KB
Newer Older
xiaotong committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-06-15
*/

22
#include "TMultiply.h"
xiaotong committed
23 24

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
25 26 27 28 29

/* 
case 1: element-wise product of two tensors
c(i) = a(i)*b(i) + \alpha * c(i)
In this case, (2, 2)  (2, 2) -> (2, 2), leadingDim=0, alpha=0.
xiaotong committed
30
*/
31
bool TestMultiply1()
xiaotong committed
32
{
liyinqiao committed
33
	/* a source tensor of size (2, 2) */
xiaotong committed
34 35 36 37 38 39 40 41 42
	int sOrder1 = 2;
	int * sDimSize1 = new int[sOrder1];
	sDimSize1[0] = 2;
	sDimSize1[1] = 2;

	int sUnitNum1 = 1;
	for (int i = 0; i < sOrder1; i++)
		sUnitNum1 *= sDimSize1[i];

liyinqiao committed
43
	/* a source tensor of size (2, 2) */
xiaotong committed
44 45 46 47 48 49 50 51 52
	int sOrder2 = 2;
	int * sDimSize2 = new int[sOrder2];
	sDimSize2[0] = 2;
	sDimSize2[1] = 2;

	int sUnitNum2 = 1;
	for (int i = 0; i < sOrder2; i++)
		sUnitNum2 *= sDimSize2[i];

liyinqiao committed
53
	/* a target tensor of size (2, 2) */
xiaotong committed
54 55 56 57 58 59 60 61 62
	int tOrder = 2;
	int * tDimSize = new int[tOrder];
	tDimSize[0] = 2;
	tDimSize[1] = 2;

	int tUnitNum = 1;
	for (int i = 0; i < tOrder; i++)
		tUnitNum *= tDimSize[i];

liyinqiao committed
63 64 65 66 67 68
	DTYPE sData1[2][2] = { {0.0F, 1.0F},
	                       {2.0F, 3.0F} };
	DTYPE sData2[2][2] = { {0.0F, 1.0F},
	                       {2.0F, 3.0F} };
	DTYPE answer[2][2] = { {0.0F, 1.0F},
	                       {4.0F, 9.0F} };
xiaotong committed
69 70 71 72 73 74 75 76

	/* CPU test */
	bool cpuTest = true;

	/* create tensors */
	XTensor * s1 = NewTensor(sOrder1, sDimSize1);
	XTensor * s2 = NewTensor(sOrder2, sDimSize2);
	XTensor * t = NewTensor(tOrder, tDimSize);
77 78
    XTensor * tMe = NewTensor(tOrder, tDimSize);
    XTensor tUser;
xiaotong committed
79 80 81

	/* initialize variables */
	s1->SetData(sData1, sUnitNum1);
82
	tMe->SetData(sData1, sUnitNum1);
xiaotong committed
83 84 85
	s2->SetData(sData2, sUnitNum2);
	t->SetZeroAll();

86
	/* call Multiply function */
87
	_Multiply(s1, s2, t, 0, 0);
88
	_MultiplyMe(tMe, s2, 0, 0);
89
    tUser = Multiply(*s1, *s2, 0);
xiaotong committed
90 91

	/* check results */
92 93 94
	cpuTest = t->CheckData(answer, tUnitNum) && 
              tMe->CheckData(answer, tUnitNum) && 
              tUser.CheckData(answer, tUnitNum);
xiaotong committed
95 96 97 98 99 100 101 102 103

#ifdef USE_CUDA
	/* GPU test */
	bool gpuTest = true;

	/* create tensor */
	XTensor * sGPU1 = NewTensor(sOrder1, sDimSize1, X_FLOAT, 1.0F, 0);
	XTensor * sGPU2 = NewTensor(sOrder2, sDimSize2, X_FLOAT, 1.0F, 0);
	XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
104 105
    XTensor * tMeGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
    XTensor tUserGPU;
xiaotong committed
106 107 108

	/* Initialize variables */
	sGPU1->SetData(sData1, sUnitNum1);
109
	tMeGPU->SetData(sData1, sUnitNum1);
xiaotong committed
110 111 112
	sGPU2->SetData(sData2, sUnitNum2);
	tGPU->SetZeroAll();

113
	/* call Multiply function */
114
	_Multiply(sGPU1, sGPU2, tGPU, 0, 0);
115
	_MultiplyMe(tMeGPU, sGPU2, 0, 0);
116
    tUserGPU = Multiply(*sGPU1, *sGPU2, 0);
xiaotong committed
117 118

	/* check results */
119 120 121
	gpuTest = tGPU->CheckData(answer, tUnitNum) && 
              tMeGPU->CheckData(answer, tUnitNum) && 
              tUserGPU.CheckData(answer, tUnitNum);
xiaotong committed
122 123

	/* destroy variables */
liyinqiao committed
124 125 126
    delete s1;
    delete s2;
    delete t;
127
    delete tMe;
liyinqiao committed
128 129 130
    delete sGPU1;
    delete sGPU2;
    delete tGPU;
131
    delete tMeGPU;
liyinqiao committed
132 133 134
    delete[] sDimSize1;
    delete[] sDimSize2;
    delete[] tDimSize;
xiaotong committed
135 136 137

	return cpuTest && gpuTest;
#else
liyinqiao committed
138 139 140 141
    /* destroy variables */
    delete s1;
    delete s2;
    delete t;
142
    delete tMe;
liyinqiao committed
143 144 145
    delete[] sDimSize1;
    delete[] sDimSize2;
    delete[] tDimSize;
xiaotong committed
146 147 148 149 150 151 152 153 154 155

	return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
TODO!!
*/

156
/* test for Multiply Function */
157
bool TestMultiply()
xiaotong committed
158
{
159
	XPRINT(0, stdout, "[TEST Multiply] element-wise product of two tensors \n");
xiaotong committed
160 161 162
	bool returnFlag = true, caseFlag = true;

	/* case 1 test */
163
	caseFlag = TestMultiply1();
xiaotong committed
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

	if (!caseFlag) {
		returnFlag = false;
		XPRINT(0, stdout, ">> case 1 failed!\n");
	}
	else
		XPRINT(0, stdout, ">> case 1 passed!\n");

	/* other cases test */
	/*
	TODO!!
	*/

	if (returnFlag) {
		XPRINT(0, stdout, ">> All Passed!\n");
	}
	else
		XPRINT(0, stdout, ">> Failed!\n");

	XPRINT(0, stdout, "\n");

	return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)