MatrixMulBatched.h 3.51 KB
Newer Older
xiaotong committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/

#ifndef __MATRIXMULBATCHED_H__
#define __MATRIXMULBATCHED_H__

25
#include "../../XTensor.h"
xiaotong committed
26 27 28

namespace nts { // namespace nts(NiuTrans.Tensor)

29 30
#define BMMul MatrixMulBatched

xiaotong committed
31
/*
32
matrix multiplication of the two tensors c = trans(a) * trans(b) * alpha + c * beta
33 34 35 36 37 38 39

for each 2-dimensional data array in a (denoted as ai) and
each 2-dimensional data array in b (denoted as bi), we have
ci = trans(ai) * trans(bi) * alpha + cm * beta
where trans() returns the transposed matrix if the flag is fired
*/
void _MatrixMulBatched(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB,
40
                       XTensor * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0, XPRunner * parallelRunner = NULL);
41

42 43 44 45 46 47

/*
matrix multiplication of the two tensors c = trans(a) * trans(b) * alpha + c * beta
optimized for GPU
*/
void _MatrixMulBatchedGPU(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB,
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
                          XTensor * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0);

/*
matrix multiplication of the two tensors c = trans(a) * trans(b) * alpha + c * beta
optimized for GPU
*/
void _MatrixMulBatchedCPU(const XTensor * a, MATRIX_TRANS_TYPE transposedA, const XTensor * b, MATRIX_TRANS_TYPE transposedB, 
                          XTensor * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0);

/*
matrix multiplication of the two tensors c = trans(a) * trans(b) * alpha + c * beta (for list inputs)
optimized for GPU
*/
void _MatrixMulBatchedCPU(const XList * a, MATRIX_TRANS_TYPE transposedA, const XList * b, MATRIX_TRANS_TYPE transposedB, 
                          XList * c, DTYPE alpha = (DTYPE)1.0, DTYPE beta = 0);
63

64
/*
65
matrix multiplication of the two tensors (return a XTensor structure) c = trans(a) * trans(b) * alpha
66 67
make a new tensor to keep the result and return it

xiaotong committed
68 69 70 71 72
for each 2-dimensional data array in a (denoted as ai) and
each 2-dimensional data array in b (denoted as bi), we have
ci = trans(ai) * trans(bi) * alpha + cm * beta
where trans() returns the transposed matrix if the flag is fired
*/
73
XTensor MatrixMulBatched(const XTensor &a, MATRIX_TRANS_TYPE transposedA, const XTensor &b, MATRIX_TRANS_TYPE transposedB,
74
                         DTYPE alpha = (DTYPE)1.0, XPRunner * parallelRunner = NULL);
xiaotong committed
75

76 77 78 79 80 81 82 83 84 85 86
/*
matrix multiplication of the two tensors (return a XTensor structure) c = a * b * alpha
make a new tensor to keep the result and return it

for each 2-dimensional data array in a (denoted as ai) and
each 2-dimensional data array in b (denoted as bi), we have
ci = ai * bi * alpha + cm * beta
*/
XTensor MatrixMulBatched(const XTensor &a, const XTensor &b, 
                         DTYPE alpha = (DTYPE)1.0, XPRunner * parallelRunner = NULL);

xiaotong committed
87 88 89
} // namespace nts(NiuTrans.Tensor)

#endif // __MATRIXMULBATCHED_H__