Commit b6d0c251 by liyinqiao

Update the manual and CMake.

1. Support to generate the dynamic link library on GPU.
2. Support to compile the project on the MKL and OpenBLAS.
3. Update the manual to support the new CMake file.
4. Fix minor errors.
parent 3fb4bd2e
# if your visual studio's version is before 2019
# use commond "cmake -A x64 .." to build this project
# or use cmake gui to build VS (remember to select x64)
# if there's some warnings, don't worry about that.
# cmake minimum version
cmake_minimum_required(VERSION 2.8)
......@@ -29,18 +24,18 @@ option(USE_MKL "Use MKL" OFF)
option(USE_OPENBLAS "Use OpenBLAS" OFF)
option(GEN_DLL "Generate Dynamic Link Library" OFF)
# If set USE_CUDA ON, please modify CUDA_TOOLKIT_ROOT_DIR below.
# If set USE_CUDA ON, please modify CUDA_ROOT below.
# If set USE_MKL ON, please modify the INTEL_ROOT below.
# If set USE_OPENBLAS ON, please modify the OPENBLAS_ROOT below.
if (USE_CUDA)
if(NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR})
if(NOT EXISTS ${CUDA_ROOT})
if(WIN32)
set(CUDA_TOOLKIT_ROOT_DIR "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.2")
set(CUDA_ROOT "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.2")
else()
set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda-9.2")
set(CUDA_ROOT "/usr/local/cuda-9.2")
endif()
endif()
message(STATUS "CUDA_TOOLKIT_ROOT_DIR: ${CUDA_TOOLKIT_ROOT_DIR}")
message(STATUS "CUDA_ROOT: ${CUDA_ROOT}")
endif()
if(USE_MKL)
if(NOT DEFINED INTEL_ROOT)
......@@ -125,9 +120,9 @@ if(USE_CUDA)
)
endif()
set(CMAKE_POLICY_DEFAULT_CMP0028 NEW)
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib/x64")
include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include")
set(CUDA_LIB_DIR "${CUDA_TOOLKIT_ROOT_DIR}/lib/x64/")
link_directories("${CUDA_ROOT}/lib/x64")
include_directories("${CUDA_ROOT}/include")
set(CUDA_LIB_DIR "${CUDA_ROOT}/lib/x64/")
set(CUDA_LIB_PATH ${CUDA_LIB_PATH} "${CUDA_LIB_DIR}cublas.lib")
set(CUDA_LIB_PATH ${CUDA_LIB_PATH} "${CUDA_LIB_DIR}npps.lib")
set(CUDA_LIB_PATH ${CUDA_LIB_PATH} "${CUDA_LIB_DIR}nppc.lib")
......@@ -157,9 +152,9 @@ if(USE_CUDA)
-gencode=arch=compute_70,code=compute_70
)
endif()
link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64)
include_directories(${CUDA_TOOLKIT_ROOT_DIR}/include)
set(CUDA_LIB_DIR "${CUDA_TOOLKIT_ROOT_DIR}/lib64/")
link_directories(${CUDA_ROOT}/lib64)
include_directories(${CUDA_ROOT}/include)
set(CUDA_LIB_DIR "${CUDA_ROOT}/lib64/")
set(CUDA_LIB_PATH ${CUDA_LIB_PATH} "${CUDA_LIB_DIR}libcublas_static.a")
set(CUDA_LIB_PATH ${CUDA_LIB_PATH} "${CUDA_LIB_DIR}libculibos.a")
set(CUDA_LIB_PATH ${CUDA_LIB_PATH} "${CUDA_LIB_DIR}libnpps_static.a")
......
......@@ -52,7 +52,7 @@ int main( int argc, const char ** argv )
else if(argc > 1 && !strcmp(argv[1], "-t2t"))
TransformerMain(argc - 1, argv + 1);
else{
fprintf(stderr, "Thanks for using NiuTrans.Network! This is a library for building\n");
fprintf(stderr, "Thanks for using NiuTensor! This is a library for building\n");
fprintf(stderr, "neural networks in an easy way. \n\n");
fprintf(stderr, "Run this program with \"-test\" for unit test!\n");
fprintf(stderr, "Or run this program with \"-fnnlm\" for sample FNNLM!\n");
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论