TSort.cpp 6.39 KB
Newer Older
xiaotong committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-04-30
*/

liyinqiao committed
22
#include "TSort.h"
xiaotong committed
23 24

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
25 26

/* case 1: sort the tensor along a given dimension */
xiaotong committed
27 28
bool TestSort1()
{
liyinqiao committed
29
    /* a tensor of size (2, 4) */
xiaotong committed
30 31 32 33 34 35 36 37 38
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 4;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

liyinqiao committed
39 40 41 42
    DTYPE aData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE answer[2][4] = { {4.0F, 5.0F, 6.0F, 7.0F},
                           {0.0F, 1.0F, 2.0F, 3.0F} };
xiaotong committed
43 44 45 46 47 48

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * a = NewTensor(order, dimSize);
49 50 51 52
    XTensor * b = NewTensor(order, dimSize);
    XTensor * aMe = NewTensor(order, dimSize);
    XTensor * index = NewTensor(order, dimSize, X_INT);
    XTensor bUser(order, dimSize, X_FLOAT, 1.0F, -1, NULL);
xiaotong committed
53 54 55

    /* initialize variables */
    a->SetData(aData, unitNum);
56 57
    aMe->SetData(aData, unitNum);
    index->SetZeroAll();
xiaotong committed
58

liyinqiao committed
59
    /* call Sort function */
60 61 62
    _Sort(a, b, index, 0);
    _SortMe(aMe, index, 0);
    Sort(*a, bUser, *index, 0);
liyinqiao committed
63

64
    cpuTest = b->CheckData(answer, unitNum) && aMe->CheckData(answer, unitNum) && bUser.CheckData(answer, unitNum);
xiaotong committed
65 66 67 68 69 70

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
71
    XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
72 73 74 75 76
    XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * aMeGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * indexGPU = NewTensor(order, dimSize, X_INT, 1.0F, 0);
    XTensor bUserGPU(order, dimSize, X_FLOAT, 1.0F, 0, NULL);

xiaotong committed
77 78
    /* Initialize variables */
    aGPU->SetData(aData, unitNum);
79 80
    aMeGPU->SetData(aData, unitNum);
    indexGPU->SetZeroAll();
xiaotong committed
81 82

    /* call sum function */
83 84 85
    _Sort(aGPU, bGPU, indexGPU, 0);
    _SortMe(aMeGPU, indexGPU, 0);
    Sort(*aGPU, bUserGPU, *indexGPU, 0);
xiaotong committed
86 87

    /* check results */
88
    gpuTest = bGPU->CheckData(answer, unitNum) && aMeGPU->CheckData(answer, unitNum) && bUserGPU.CheckData(answer, unitNum);
xiaotong committed
89 90

    /* destroy variables */
liyinqiao committed
91 92
    delete a;
    delete b;
93 94
    delete aMe;
    delete index;
liyinqiao committed
95 96
    delete aGPU;
    delete bGPU;
97 98
    delete aMeGPU;
    delete indexGPU;
xiaotong committed
99
    delete[] dimSize;
liyinqiao committed
100

xiaotong committed
101 102 103 104 105
    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete a;
    delete b;
106 107
    delete aMe;
    delete index;
xiaotong committed
108
    delete[] dimSize;
liyinqiao committed
109

xiaotong committed
110 111 112 113 114 115
    return cpuTest;
#endif // USE_CUDA
}

bool TestSort2()
{
liyinqiao committed
116
    /* a tensor of size (2, 4) */
xiaotong committed
117 118 119 120 121 122 123 124 125
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 4;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

liyinqiao committed
126 127 128 129
    DTYPE aData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE answer[2][4] = { {3.0F, 2.0F, 1.0F, 0.0F},
                           {7.0F, 6.0F, 5.0F, 4.0F} };
xiaotong committed
130 131 132 133 134 135

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * a = NewTensor(order, dimSize);
136 137 138 139
    XTensor * b = NewTensor(order, dimSize);
    XTensor * aMe = NewTensor(order, dimSize);
    XTensor * index = NewTensor(order, dimSize, X_INT);
    XTensor bUser(order, dimSize, X_FLOAT, 1.0F, -1, NULL);
xiaotong committed
140 141 142

    /* initialize variables */
    a->SetData(aData, unitNum);
143 144 145
    aMe->SetData(aData, unitNum);
    index->SetZeroAll();
    
liyinqiao committed
146
    /* call Sort function */
147 148 149
    _Sort(a, b, index, 1);
    _SortMe(aMe, index, 1);
    Sort(*a, bUser, *index, 1);
liyinqiao committed
150

xiaotong committed
151
    /* check results */
152
    cpuTest = b->CheckData(answer, unitNum) && aMe->CheckData(answer, unitNum) && bUser.CheckData(answer, unitNum);
xiaotong committed
153 154 155 156 157 158

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
159
    XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
160 161 162 163
    XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * aMeGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * indexGPU = NewTensor(order, dimSize, X_INT, 1.0F, 0);
    XTensor bUserGPU(order, dimSize, X_FLOAT, 1.0F, 0, NULL);
liyinqiao committed
164

xiaotong committed
165 166
    /* Initialize variables */
    aGPU->SetData(aData, unitNum);
167 168
    aMeGPU->SetData(aData, unitNum);
    indexGPU->SetZeroAll();
xiaotong committed
169 170

    /* call sum function */
171 172 173
    _Sort(aGPU, bGPU, indexGPU, 1);
    _SortMe(aMeGPU, indexGPU, 1);
    Sort(*aGPU, bUserGPU, *indexGPU, 1);
xiaotong committed
174 175

    /* check results */
176
    gpuTest = bGPU->CheckData(answer, unitNum) && aMeGPU->CheckData(answer, unitNum) && bUserGPU.CheckData(answer, unitNum);
xiaotong committed
177 178

    /* destroy variables */
liyinqiao committed
179 180
    delete a;
    delete b;
181 182
    delete aMe;
    delete index;
liyinqiao committed
183 184
    delete aGPU;
    delete bGPU;
185 186
    delete aMeGPU;
    delete indexGPU;
xiaotong committed
187
    delete[] dimSize;
liyinqiao committed
188

xiaotong committed
189 190 191 192 193
    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete a;
    delete b;
194 195
    delete aMe;
    delete index;
xiaotong committed
196
    delete[] dimSize;
liyinqiao committed
197

xiaotong committed
198 199 200
    return cpuTest;
#endif // USE_CUDA
}
liyinqiao committed
201

xiaotong committed
202 203 204 205 206
/* other cases */
/*
TODO!!
*/

liyinqiao committed
207
/* test for Sort Function */
xiaotong committed
208 209
bool TestSort()
{
liyinqiao committed
210
    XPRINT(0, stdout, "[TEST SORT] sort the tensor along a given dimension \n");
xiaotong committed
211 212
    bool returnFlag = true, caseFlag = true;

213 214 215 216 217 218 219 220
    /* case 1 test */
    caseFlag = TestSort1();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
xiaotong committed
221 222 223 224 225 226 227 228 229

    /* case 2 test */
    caseFlag = TestSort2();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");
liyinqiao committed
230

xiaotong committed
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)