Gather.cu 6.85 KB
Newer Older
1
/* NiuTrans.Tensor - an open-source tensor library
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
 * Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
 * All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
17 18

/*
19 20
 * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-11-27
 */
21 22 23 24 25 26 27 28 29 30 31 32 33

#include "Gather.cuh"
#include "CopyBlocksSelected.cuh"
#include "../../XDevice.h"
#include "../../XUtility.h"

namespace nts { // namespace nts(NiuTrans.Tensor)

#ifdef USE_CUDA

/*
gather indexed sub-tensors(cuda version)

34 35 36
>> sData - the data pointer of the source tensor
>> tData - the data pointer of the target tensor
>> sIndex - the index of the source tensor
37 38 39 40
>> indexSize - the size of the srcIndex
>> stride - stride of a data block
*/
__global__
41
void KernelGather(DTYPE * sData, DTYPE * tData, int * sIndex, int indexSize, int stride)
42 43
{
    __shared__ DTYPE * sp[MAX_CUDA_THREAD_NUM_PER_BLOCK];
44
    __shared__ DTYPE * tp[MAX_CUDA_THREAD_NUM_PER_BLOCK];
45 46 47 48 49 50 51 52 53 54 55

    /* block id */
    int i = blockDim.x * blockIdx.x + threadIdx.x;

    /* offset in each block */
    int offset = blockDim.y * blockIdx.y + threadIdx.y;

    if(i >= indexSize || offset >= stride)
        return;

    if(threadIdx.y == 0){
56
        sp[threadIdx.x] = sData + sIndex[i] * stride;
57
        tp[threadIdx.x] = tData + i * stride;
58 59 60 61 62
    }

    __syncthreads();

    DTYPE * s = sp[threadIdx.x];
63
    DTYPE * t = tp[threadIdx.x];
64

65
    t[offset] = s[offset];
66 67 68 69 70
}

/*
gather indexed sub-tensors(cuda version)

71 72 73 74 75 76 77 78 79
>> sData - the data pointer of the source tensor
>> tData - the data pointer of the target tensor
>> sIndex - the index of the source tensor
>> indexSize - the size of the srcIndex
>> stride - stride of a data block
>> strideNum - strideNum of a data block
>> blockNum - block size of data
*/
__global__
liyinqiao committed
80
void KernelGather(DTYPE * sData, DTYPE * tData, int * sIndex, int stride, int strideNum, int blockNum, int srcStrideNum)
81 82 83 84 85 86 87 88 89 90 91 92
{
    int idx = blockDim.x * blockIdx.x + threadIdx.x;
    int idy = blockDim.y * blockIdx.y + threadIdx.y;
    int blockIndex = idy / stride;
    int offsetInBlock = idy % stride;

    int size = stride * strideNum * blockNum;  

#pragma unroll
    for (int i = idx * stride + stride * strideNum * blockIndex + offsetInBlock;
        i < stride * strideNum * blockIndex + offsetInBlock + stride * strideNum && i < size;
        i += stride * blockDim.x) {
liyinqiao committed
93
        tData[i] = sData[sIndex[i] * stride + stride * srcStrideNum * blockIndex + offsetInBlock];
94 95 96 97 98 99
    }
}

/*
gather indexed sub-tensors(cuda version)

100 101 102 103
>> s - the source tensor
>> t - the target tensor
>> srcIndex - the tensor to save the index of the source tensor
*/
104
void _CudaGather(const XTensor * s, XTensor * t, XTensor * srcIndex)
105 106
{
    int devID = s->devID;
107
    XMem * mem = s->mem;
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122

    int stride = s->GetDim(1);
    int indexSize = srcIndex->unitNum;

    int cudaGrids[3];
    int cudaBlocks[3];

    int devIDBackup;
    ProtectCudaDev(devID, devIDBackup);

    GDevs.GetCudaThread2D(devID, indexSize, stride, MAX_INT, cudaGrids, cudaBlocks);

    dim3 blocks(cudaGrids[0], cudaGrids[1]);
    dim3 threads(cudaBlocks[0], cudaBlocks[1]);

123 124
    DTYPE * sData = (DTYPE*)s->data;
    DTYPE * tData = (DTYPE*)t->data;
125

126 127 128
    int * sIndex = NULL;
    
    if (srcIndex->devID < 0) {
liyinqiao committed
129 130 131 132 133 134
        int * sIndexData = (int*)srcIndex->data;
        for (int i = 0; i < indexSize; i++) {
            int srcIndexValue = sIndexData[i] * stride;
            CheckNTErrors(srcIndexValue < s->unitNum, "Wrong index!");
        }

135 136 137 138 139
        sIndex = mem != NULL ? 
                  (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) : 
                  (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize);
        XMemCopy(sIndex, devID, srcIndex, -1, sizeof(int) * indexSize);
    }
liyinqiao committed
140
    else {
141
        int * sIndexData = new int[sizeof(int) * indexSize];
142
        XMemCopy(sIndexData, -1, srcIndex->data, srcIndex->devID, sizeof(int) * indexSize);
liyinqiao committed
143 144 145 146 147
        for (int i = 0; i < indexSize; i++) {
            int srcIndexValue = sIndexData[i] * stride;
            CheckNTErrors(srcIndexValue < s->unitNum, "Wrong index!");
        }

148
        sIndex = (int *)srcIndex->data;
149

150
        delete[] sIndexData;
liyinqiao committed
151 152
    }

153
    KernelGather<<<blocks, threads >>>(sData, tData, sIndex, indexSize, stride);
154 155 156 157 158 159 160 161

    if (srcIndex->devID < 0) {
        if(mem != NULL)
            mem->ReleaseBuf(mem->devID, sizeof(int) * indexSize);
        else
            XMemFree(mem->devID, sIndex);
    }

162 163 164
    BacktoCudaDev(devID, devIDBackup);
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/*
gather indexed sub-tensors(cuda version)

>> s - the source tensor
>> t - the target tensor
>> srcIndex - the tensor to save the index of the source tensor
>> dim - the leading dimension to define "sub-tensors"
*/
void _CudaGather(const XTensor * s, XTensor * t, XTensor * srcIndex, int dim)
{
    int devID = srcIndex->devID;
    XMem * mem = s->mem;

    int stride = 1;
    int blockNum = 1;
    int indexSize = srcIndex->unitNum;
    int strideNum = srcIndex->dimSize[dim];
liyinqiao committed
182
    int srcStrideNum = s->dimSize[dim];
183 184 185 186 187 188 189
    for (int i = 0; i < dim; i++)
        blockNum *= srcIndex->dimSize[i];
    for (int i = dim + 1; i < srcIndex->order; i++)
        stride *= srcIndex->dimSize[i];

    int * sIndex = NULL;
    if (srcIndex->devID < 0) {
liyinqiao committed
190 191 192 193 194 195
        int * sIndexData = (int*)srcIndex->data;
        for (int i = 0; i < indexSize; i++) {
            int srcIndexValue = sIndexData[i] * stride;
            CheckNTErrors(srcIndexValue < s->unitNum, "Wrong index!");
        }

196
        sIndex = mem != NULL ?
liyinqiao committed
197 198
                  (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) :
                  (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize);
199 200
        XMemCopy(sIndex, devID, srcIndex, -1, sizeof(int) * indexSize);
    }
liyinqiao committed
201 202 203 204 205 206 207 208
    else {
        int * sIndexData = new int[sizeof(int) * indexSize];
        XMemCopy(sIndexData, -1, srcIndex->data, srcIndex->devID, sizeof(int) * indexSize);
        for (int i = 0; i < indexSize; i++) {
            int srcIndexValue = sIndexData[i] * stride;
            CheckNTErrors(srcIndexValue < s->unitNum, "Wrong index!");
        }

209
        sIndex = (int *)srcIndex->data;
liyinqiao committed
210 211
	   delete[] sIndexData;
    }
212 213 214 215

    int cudaGrids[3];
    int cudaBlocks[3];
    GDevs.GetCudaThread2D(devID, max(32, strideNum), stride*blockNum, MAX_INT, cudaGrids, cudaBlocks);
liyinqiao committed
216
    KernelGather << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((DTYPE *)s->data, (DTYPE *)t->data, sIndex, stride, strideNum, blockNum, srcStrideNum);
217
}
218 219 220
#endif // USE_CUDA

} // namespace nts(NiuTrans.Tensor)