XTensor.h 14.9 KB
Newer Older
xiaotong committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/* NiuTrans.Tensor - an open-source tensor library
 * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. 
 * All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*
 * 
 * the tensor class
 *
 * $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2017-07-31
 * I'm working while most of the students are enjoying their holidays :(
24
 * $Updated by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2017-11-18 bug fixes
xiaotong committed
25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 */

#ifndef __XTENSOR_H__
#define __XTENSOR_H__

#include "XGlobal.h"
#include "XMem.h"
#include "XPRunner.h"
#include "XStream.h"
#include "XHeap.h"
#include "XList.h"
#include "XDataType.h"
#include "XMem.h"
39
#include "XLink.h"
xiaotong committed
40 41 42 43

/* the nts (NiuTrans.Tensor) namespace */
namespace nts{

44 45 46
/* cross reference */
struct XLink;

xiaotong committed
47
/* define the maximum number of dimensions in a tensor */
48
#define MAX_TENSOR_DIM_NUM 8
xiaotong committed
49
#define USE_BATCHED_STRIDED_MAT_MUL
50
#define MIN_TENSOR_SPLIT_NUM 0
xiaotong committed
51 52 53
#define MIN_TENSOR_SPLIT_LIST_NUM 1024
#define MIN_TENSOR_CAT_NUM 8

54
/* computation flags */
xiaotong committed
55 56 57
#define UNSAFE_BUT_FAST_MEM
#define FAST_MATRIX

58
/* XTensor is a class to do everything a tensor can do :) */
xiaotong committed
59 60
struct XTensor
{
61 62 63 64
public:
    /* id */
    int id;

xiaotong committed
65 66 67
    /* memory pool */
    XMem * mem;

68 69 70
    /* signature of the memory pool */
    MTYPE signature;

xiaotong committed
71 72 73 74 75 76
    /* data array to keep the elements */
    void * data;

    /* copy of data on the host memory. It is only activated 
       when the matrix is operated on GPUs */
    void * dataHost;
77 78 79 80
    
    /* a pointer to data (i.e., a pointer to the address of "data".
       This is for reset "data" when XTensor is used as a const variable. */
    void ** dataP;
xiaotong committed
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140

    /* 
    device id 
    <0:  CPU memory
    >=0: GPU device ID
    */
    int devID;

    /* order of the tensor. e.g., a matrix (a 2-dimensional array) 
       is a 2nd-order tensor */
    int order;

    /* size of each dimension */
    int dimSize[MAX_TENSOR_DIM_NUM];

    /* size of each dimension by Reversed Dimension Indexing (RDI) Mode */
    int dimSizeRDI[MAX_TENSOR_DIM_NUM];

    /* data unit - data type for every cell */
    TENSOR_DATA_TYPE dataType;

    /* size of matrix unit, e.g., sizeof(int) */
    int unitSize;

    /* number of units */
    int unitNum;

    /*
    if it is a sparse matrix
    dense matrix:  there are n * m entries - i.e.,
                   the size of "data" is n * m
    sparse matrix: number of entries depends on how
                   many entries are non-zero
    */
    bool isSparse;

    /* nubmer of non-zero items in a sparse matrix */
    int unitNumNonZero;

    /*
    denseRatio - how dense the matrix is
    denseRatio = 1: a dense matrix
    denseRatio < 1: how often an element has a non-zero value
    */
    float denseRatio;

    /* indicates whether the data array is shared with other tensors */
    bool isShared;

    /* indicates whether the date type used in this matrix is in default type (i.e., DTYPE) */
    bool isDefaultDType;

    /* indicates whether the data is allocated in the global memory rather than a memory pool */
    bool isInGlobalMem;

    /* indicates whether the SPARSE tensor has non-zero values for all entries alone each dimension */
    bool isAllValued[MAX_TENSOR_DIM_NUM];

    /* indicates whether the tensor is initialized or not */
    bool isInit;
141 142 143

    /* indicates whether the tensor is created temporarily */
    bool isTmp;
144

145 146 147
    /* indicates whether the tensor keeps the gradient when used as model parameters */
    bool isGrad;

148 149 150
    /* indicates whether the tensor is used as paramters (or variables) */
    bool isVar;

151 152
    /* mark for traversing the gragh */
    unsigned int visitMark;
153 154 155

    /* gradient (for back-propagation) */
    XTensor * grad;
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
    
    /*
    the link used to form networks. Note that when we compute on tensors, we actually create a
    network where nodes are tensors and edges the connections among them. Each connection is
    a hyperedge whose head is the output tensor and tails are input tensors. E.g,
    c = a + b
    represents a network with three nodes (a, b and c) and a hyperedge that links a and b (tails) to c (head).
    Here "income" keeps which nodes (tensors) are used to form the current node (tensor).
    */
    XLink income;
    
    /* It keeps which nodes (tensors) we go to from the current node (tensor). */
    XLink outgo;

    /********************
     XTensor untilities
    ********************/
    
xiaotong committed
174 175 176 177
    /* constructor */
    XTensor();

    /* constructor */
178
    XTensor(const XTensor * reference);
xiaotong committed
179 180 181 182 183 184

    /* constructor */
    XTensor(const int myOrder, int myDevID, XMem * myMem);

    /* constructor */
    XTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType, 
185
            const float myDenseRatio, int myDevID, XMem * myMem);
xiaotong committed
186

187 188 189
    /* copy constructor */
    XTensor(const XTensor &reference);

xiaotong committed
190 191 192
    /* de-constructor */
    ~XTensor();

193 194 195
    /* initialize member variables */
    void Init();

xiaotong committed
196 197 198
    /* delete data arrays */
    void DestroyData();

199 200 201
    /* shallow copy of tensor */
    void ShallowCopy(const XTensor &tensor);

xiaotong committed
202
    /* overloading of the equal-sign */
203 204 205 206
    XTensor& operator= (const XTensor &tensor);

    /* overloading of the plus-sign */
    XTensor  operator+ (const XTensor &tensor);
207 208 209
    
    /* overloading of the plus-sign */
    XTensor  operator+ (const DTYPE shift);
210 211 212

    /* overloading of the multiply-sign */
    XTensor  operator* (const XTensor &tensor);
213 214 215
    
    /* overloading of the multiply-sign */
    XTensor  operator* (const DTYPE scale);
216

217 218
    /* overloading of the minus-sign */
    XTensor  operator- (const XTensor &tensor);
219 220 221
    
    /* overloading of the minus-sign */
    XTensor  operator- (const DTYPE shift);
222 223 224

    /* overloading of the division-sign */
    XTensor  operator/ (const XTensor &tensor);
225 226 227
    
    /* overloading of the division-sign */
    XTensor  operator/ (const DTYPE scale);
228

229 230
    /* linear transformation */
    XTensor Lin(DTYPE scale, DTYPE shift = 0);
xiaotong committed
231 232 233

    /* judge whether the two matrices are in the same type and size */
    static
234
    bool IsSameShaped(const XTensor * a, const XTensor * b);
xiaotong committed
235 236 237

    /* judge whether the three matrices are in the same type and size */
    static
238
    bool IsSameShaped(const XTensor * a, const XTensor * b, const XTensor * c);
xiaotong committed
239 240 241 242 243

    /* set the size of each dimension */
    void SetDim(int * myDimSize);

    /* get the size of a given dimension */
244
    int GetDim(const int dim) const;
xiaotong committed
245 246 247 248

    /* reshape the tensor */
    void Reshape(const int order, const int * myDimSize);

249 250 251 252 253 254
    /* reshape the tensor to a vector */
    void Reshape(const int num);

    /* reshape the tensor to a matrix */
    void Reshape(const int rowNum, const int colNum);

xiaotong committed
255
    /* get the number of items in the data array */
256
    int GetSize() const;
xiaotong committed
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

    /* get size of the memory used */
    int GetDataSizeInChar();

    /* get unit size in terms of "dataType" */
    int GetUnitSize(TENSOR_DATA_TYPE myDataType);

    /* a tensor with all entries of 0 */
    void SetZeroAll(XStream * stream = NULL);

    /* set the tensor with an data array */
    void SetData(const void * d, int num, int beg = 0);

    /* set the tensor items by a uniform distribution */
    void SetDataRand(DTYPE lower, DTYPE upper);

    /* set the tensor items by a normal distribution */
    void SetDataRandn(DTYPE mean, DTYPE standardDeviation);

    /* check whether the data array is the same as the answer */
    bool CheckData(const void * answer, int num, int beg = 0);

279 280
    /* check whether the data array is the same as the answer */
    bool CheckData(const void * answer, int num, float tolerance, int beg = 0);
281 282 283
    
    /* set the pointer to "data" */
    void SetDataPointer();
284

xiaotong committed
285 286 287 288 289 290 291
    /* set the cell to the ascending order along a given dimension */
    void SetAscendingOrder(int dim);

    /* get the value of a cell with the index */
    DTYPE Get(int index[], int size = -1);

    /* get the pointer to a cell */
292
    void * GetCell(int index[], int size = -1) const;
xiaotong committed
293

294
    /* get the default type value of a cell in a 1d tensor */
xiaotong committed
295 296
    DTYPE Get1D(int i);

297
    /* get the default type value of a cell in a 2d tensor */
298
    DTYPE Get2D(int ni, int mi) const;
xiaotong committed
299
    
300
    /* get the default type value of a cell in a 3d tensor */
xiaotong committed
301 302
    DTYPE Get3D(int d0, int d1, int d2);

303 304 305 306 307 308 309 310 311
    /* get the int value of a cell in a 1d tensor */
    int Get1DInt(int i);

    /* get the int value of a cell in a 2d tensor */
    int Get2DInt(int ni, int mi);
    
    /* get the int value of a cell in a 3d tensor */
    int Get3DInt(int d0, int d1, int d2);

xiaotong committed
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
    /* get the value of a cell in a sparse tensor */
    DTYPE GetInSparse(int i);

    /* get the key value of a tuple in a sparse tensor */
    int GetKeyInSparse(int i);

    /* set the value of a cell */
    bool Set(DTYPE value, int index[], int size = -1);

    /* set the value of a cell in a 1d tensor */
    bool Set1D(DTYPE value, int i);

    /* set the value of a cell in a 2d tensor */
    bool Set2D(DTYPE value, int ni, int mi);

    /* set the value of a cell in a 3d tensor */
    bool Set3D(DTYPE value, int d0, int d1, int d2);

    /* increase the value of a cell in a 2d */
    bool Add2D(DTYPE value, int ni, int mi);

    /* get the number of non-zero elements (in a sparse tensor) */
    int GetNonzeroSize();

336
    /* set the tensor as "temporary" */
337
    void SetTMPFlag(bool myIsTmp = true);
338

339
    /* set the tensor as "keep-gradient" */
340 341 342 343
    void SetGradFlag(bool myIsGrad = true);

    /* set the tensor as "variable" */
    void SetVarFlag(bool myIsVar = true);
344

xiaotong committed
345 346 347 348 349 350 351 352 353
    /* resize a matrix with a specified matrix size */
    bool Resize(const int myOrder, const int * myDimSize,
                const TENSOR_DATA_TYPE myDataType = DEFAULT_DTYPE,
                const float myDenseRatio = 1.0F);

    /* resize a matrix by another one */
    bool Resize(const XTensor * myTensor);

    /* binary search to find an element in a sparse matrix*/
354
    bool BinarySearch(int key, DTYPE &value, void * &position) const;
xiaotong committed
355 356

    /* dump data to a file */
357
    void Dump(FILE * file, const char * label = NULL, const int n = -1, const int beg = 0, const int verbose = 0);
xiaotong committed
358

359 360
    /* dump data to a file */
    static
361
    void Dump(const XTensor * tensor, FILE * file, const char * label = NULL, const int n = -1, const int beg = 0, const int verbose = 0);
362

xiaotong committed
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
    /* read data from a file */
    void Read(FILE * file, const char * label = NULL);

    /* flush the data to the target device */
    void FlushToMem(XMem * targetMem);

    /* allocate the memory space of the matrix (in the global memory) */
    static
    void AllocateData(XTensor * matrix, XMem * myMem = NULL, bool useBuf = false);

    /* free the memory space of the matrix (in the global memory) */
    static
    void FreeData(XTensor * matrix, XMem * myMem = NULL, bool useBuf = false);
};

378 379 380
/* we make a unique id for every tensor */
extern int tensorIDGlobal;
extern MUTEX_HANDLE tensorMutex;
381
extern XTensor NULLTensor;
382 383
extern int MakeTensorID();

xiaotong committed
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/************************************************
* we define the "new and delete" functions below
*/

/* initialize a XTensor */
void InitTensor(XTensor * tensor,
                const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
                const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);

/* initialize a dense vector */
void InitTensor1D(XTensor * tensor, const int num, 
                  const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);

/* initialize a dense matrix */
void InitTensor2D(XTensor * tensor, const int rowNum, const int colNum,
                  const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);

/* initialize a dense 3d tensor */
void InitTensor3D(XTensor * tensor, const int d0, const int d1, const int d2,
                  const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);
    
/* initialize a dense 4d tensor */
void InitTensor4D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3,
                  const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);

/* initialize a dense 5d tensor */
void InitTensor5D(XTensor * tensor, const int d0, const int d1, const int d2, const int d3, const int d4,
                  const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, XMem * myMem = NULL);

/* initialize a tensor with a reference tensor */
414
void InitTensor(XTensor * tensor, const XTensor * reference);
xiaotong committed
415 416 417 418 419 420

/* generate a XTensor */
XTensor * NewTensor(const int myOrder, const int * myDimSize, const TENSOR_DATA_TYPE myDataType = X_FLOAT,
                    const float myDenseRatio = 1.0F, const int myDevID = -1, XMem * myMem = NULL);

/* generate a XTensor which allocates data on the buffer */
421 422 423 424 425 426
XTensor * NewTensorBuf(const int myOrder, const int * myDimSize,
                       const TENSOR_DATA_TYPE myDataType = X_FLOAT, const float myDenseRatio = 1.0F,
                       const int myDevID = -1, XMem * myMem = NULL);

/* generate a XTensor which allocates data on the buffer */
XTensor * NewTensorBuf(const XTensor * reference, int devID, XMem * myMem);
xiaotong committed
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455

/* generate a dense vector */
XTensor * NewTensor1D(const int num, const TENSOR_DATA_TYPE myDataType = X_FLOAT, const int myDevID = -1, 
                      XMem * myMem = NULL);

/* generate a dense matrix */
XTensor * NewTensor2D(const int rowNum, const int colNum, 
                      const TENSOR_DATA_TYPE myDataType = X_FLOAT, 
                      const int myDevID = -1, XMem * myMem = NULL);

/* generate a dense 3d tensor */
XTensor * NewTensor3D(const int d0, const int d1, const int d2, 
                      const TENSOR_DATA_TYPE myDataType = X_FLOAT, 
                      const int myDevID = -1, XMem * myMem = NULL);

/* generate a dense 4d tensor */
XTensor * NewTensor4D(const int d0, const int d1, const int d2, const int d3,
                      const TENSOR_DATA_TYPE myDataType = X_FLOAT, 
                      const int myDevID = -1, XMem * myMem = NULL);

/* generate a dense 5d tensor */
XTensor * NewTensor5D(const int d0, const int d1, const int d2, const int d3, const int d4,
                      const TENSOR_DATA_TYPE myDataType = X_FLOAT, 
                      const int myDevID = -1, XMem * myMem = NULL);

/* generate a copy of XTensor (with a reference to a given tensor) */
XTensor * NewTensor(XTensor * a, bool isFilledData = true);

/* free the data space of a given tensor */
456
void DelTensor(XTensor * tensor);
xiaotong committed
457 458

/* free the data space of a given tensor (on the buffer) */
459
void DelTensorBuf(XTensor * tensor);
xiaotong committed
460 461 462 463

} /* end of the nts (NiuTrans.Tensor) namespace */

#endif