CopyBlocksInGrid.cpp 3.12 KB
Newer Older
xiaotong committed
1
/* NiuTrans.Tensor - an open-source tensor library
liyinqiao committed
2
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
xiaotong committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/

22
#include "../../XTensor.h"
xiaotong committed
23
#include "CopyBlocksInGrid.h"
24
#include "../../XUtility.h"
xiaotong committed
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#include "CopyBlocksInGrid.cuh"

namespace nts { // namespace nts(NiuTrans.Tensor)

/*
copy a number of blocks in grid
>> source - pointer to the source data array
>> blockSize - size of a data block
>> blockNum - number of the blocks (in a grid)
>> gridNum - number of the grids.
Note that a grid may have a number of blocks
>> target - pointer to the target data array
>> index - source block id for each target block
>> myMem - the memory pool
>> isIndexOnDev - indicates whether the index is on the device already
*/
41
void _CopyBlocksInGrid(void * source, int blockSize, int blockNum, int gridNum, void * target,
42
                       int * index, int unitSize, bool isIndexOnDev, XMem * myMem)
xiaotong committed
43 44 45 46 47 48 49
{
    CheckNTErrors((unitSize == sizeof(int)), "TODO!");

    if (myMem != NULL && myMem->devID >= 0) {
#ifdef USE_CUDA
        int * indexGPU = index;
        if (!isIndexOnDev) {
50
            myMem->LockBuf();
xiaotong committed
51 52 53 54
            indexGPU = (int*)myMem->AllocBuf(myMem->devID, blockNum * gridNum * sizeof(int));
            XMemCopy(indexGPU, myMem->devID, index, -1, blockNum * gridNum * sizeof(int));
        }

55
        _CudaCopyBlocksInGrid(source, blockSize, blockNum, gridNum, target, indexGPU, unitSize, myMem);
xiaotong committed
56

57
        if (!isIndexOnDev) {
xiaotong committed
58
            myMem->ReleaseBuf(myMem->devID, blockNum * gridNum * sizeof(int));
59 60
            myMem->UnlockBuf();
        }
xiaotong committed
61 62 63 64
#else
        ShowNTErrors("Plesae specify USE_CUDA and recompile the code!");
#endif
    }
liyinqiao committed
65
    else if(myMem != NULL){
xiaotong committed
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
        void * buf = XMemAlloc(myMem->devID, blockSize * blockNum * unitSize);
        for (int k = 0; k < gridNum; k++) {
            int offset = k * blockSize * blockNum;
            for (int i = 0; i < blockNum; i++) {
                int b = index[k * blockNum + i];
                if (b >= 0 && b < blockNum) {
                    int * t = (int*)buf + blockSize * i;
                    int * s = (int*)source + offset + blockSize * b;
                    for (int j = 0; j < blockSize; j++)
                        t[j] = s[j];
                }
            }
            XMemCopy((int*)target + offset, myMem->devID,
                buf, myMem->devID,
                blockSize * blockNum * unitSize);
        }
        XMemFree(myMem->devID, buf);
    }
liyinqiao committed
84 85 86
    else {
        ShowNTErrors("TODO!");
    }
xiaotong committed
87 88 89
}

} // namespace nts(NiuTrans.Tensor)