[CMake] CMake for CUDA with MPICC still using gcc to link?

Robert Maynard robert.maynard at kitware.com
Tue Jul 31 15:40:52 EDT 2018


The CUDA_HOST_COMPILER must be specified as a -D option on the first
invocation of cmake as it is used during the compiler detection
process.

The <LANG>_LINKER_PREFERENCE is a numeric value that determines which
language will be selected for linking when a library/executable has
source files from multiple languages.
On Tue, Jul 31, 2018 at 3:31 PM Quang Ha <quang.t.ha.20 at gmail.com> wrote:
>
> Thanks Robert - specific at the cmake -D [...] command seemsd to work. Is it possible to set this inside a CMakeLists.txt? I have tried:
>
> set(CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
> set(CUDA_LINKER_PREFERENCE ${CMAKE_CXX_COMPILER})
>
> but it snaps back to gcc still.
>
> Thanks,
> QT
>
> On Tue, 31 Jul 2018 at 13:12, Robert Maynard <robert.maynard at kitware.com> wrote:
>>
>> It is snapping back to g++ as that was the linker that nvcc uses when
>> it builds an executable. CMake detects this as part of the compiler
>> detection process.
>>
>> I haven't verified that this will work but you can try specifying the
>> LINKER_LANGUAGE(https://cmake.org/cmake/help/v3.12/prop_tgt/LINKER_LANGUAGE.html)
>> of the executable to be 'CXX'. You might also explore specifying
>> 'CMAKE_CUDA_HOST_COMPILER' as part of your initial cmake configuration
>> options to be the same as your c++ compiler ( you will need a clean
>> build directory for this to work).
>> On Tue, Jul 31, 2018 at 1:43 PM Quang Ha <quang.t.ha.20 at gmail.com> wrote:
>> >
>> > Hi all,
>> >
>> > Currently using cmake/3.11.1, I want to try and compile MPI/CUDA application with cmake. I kept running into problems of 'undefined MPI_Init', so I take a look at `make VERBOSE=1`. The output looks something like this
>> >
>> > ===============================================================================
>> > /projects/opt/centos7/cmake/3.11.1/bin/cmake -E cmake_link_script CMakeFiles/tangram.dir/link.txt --verbose=1
>> > /projects/opt/centos7/openmpi/2.1.2-gcc_6.4.0/bin/mpicc -fPIC -fopenmp -O3 -DNDEBUG  -shared -Wl,-soname,libtangram.so -o libtangram.so CMakeFiles/tangram.dir/tangram/intersect/r3d.c.o
>> > make[2]: Leaving directory `/home/qth20/develop/tangram/build'
>> > [ 40%] Built target tangram
>> > make -f app/vfgen-cuda/CMakeFiles/vfgen-cuda.dir/build.make app/vfgen-cuda/CMakeFiles/vfgen-cuda.dir/depend
>> > make[2]: Entering directory `/home/qth20/develop/tangram/build'
>> > cd /home/qth20/develop/tangram/build && /projects/opt/centos7/cmake/3.11.1/bin/cmake -E cmake_depends "Unix Makefiles" /home/qth20/develop/tangram /home/qth20/develop/tangram/app/vfgen-cuda /home/qth20/develop/tangram/build /home/qth20/develop/tangram/build/app/vfgen-cud$
>> >  /home/qth20/develop/tangram/build/app/vfgen-cuda/CMakeFiles/vfgen-cuda.dir/DependInfo.cmake --color=
>> > make[2]: Leaving directory `/home/qth20/develop/tangram/build'
>> > make -f app/vfgen-cuda/CMakeFiles/vfgen-cuda.dir/build.make app/vfgen-cuda/CMakeFiles/vfgen-cuda.dir/build
>> > make[2]: Entering directory `/home/qth20/develop/tangram/build'
>> > [ 60%] Building CUDA object app/vfgen-cuda/CMakeFiles/vfgen-cuda.dir/vfgen-cuda.cu.o
>> > cd /home/qth20/develop/tangram/build/app/vfgen-cuda && /projects/opt/centos7/cuda/9.0/bin/nvcc  -DCUDA_CALLABLE="\"__host__ __device__\"" -DENABLE_MPI -DHAVE_LAPACKE -DMPICH_SKIP_MPICXX -DOMPI_SKIP_MPICXX -DTHRUST -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_CUDA -DTHRUST$
>> > HOST_SYSTEM=THRUST_HOST_SYSTEM_CPP -I/home/qth20/develop/tangram -I/home/qth20/develop/tangram/build -I/home/qth20/develop/tangram/cinch/logging -I/home/qth20/installed/jali-gcc_6.4.0-openmpi-2.1.2/include -I/home/qth20/installed/jali-tpls-gcc_6.4.0-openmpi-2.1.2/include
>> > -I/home/qth20/installed/jali-tpls-gcc_6.4.0-openmpi-2.1.2/trilinos-12-10-1/include -I/projects/opt/centos7/openmpi/2.1.2-gcc_6.4.0/include -I/home/qth20/installed/jali-tpls-gcc_6.4.0-openmpi-2.1.2/trilinos-12-10-1/lib/cmake/Zoltan/../../../include -I/home/qth20/installed$
>> > jali-tpls-gcc_6.4.0-openmpi-2.1.2/include/UnitTest++ -I/usr/include/lapacke -I/home/qth20/installed/xmof2d-gcc_6.4.0-openmpi_2.1.2/include -I/projects/opt/centos7/cuda/9.0/include  --expt-relaxed-constexpr --std=c++11 -Xcompiler -fPIC -lmpi  -O3 -DNDEBUG   -x cu -c /home$
>> > qth20/develop/tangram/app/vfgen-cuda/vfgen-cuda.cu -o CMakeFiles/vfgen-cuda.dir/vfgen-cuda.cu.o
>> > /home/qth20/develop/tangram/tangram/wrappers/mesh/AuxMeshTopology.h(1708): warning: variable "nnodes" was declared but never referenced
>> >
>> > /home/qth20/develop/tangram/tangram/wrappers/mesh/AuxMeshTopology.h(1993): warning: variable "ncells" was declared but never referenced
>> >
>> > /home/qth20/develop/tangram/tangram/wrappers/mesh/AuxMeshTopology.h(2070): warning: variable "ibndry" was declared but never referenced
>> >
>> > [ 80%] Linking CUDA device code CMakeFiles/vfgen-cuda.dir/cmake_device_link.o
>> > cd /home/qth20/develop/tangram/build/app/vfgen-cuda && /projects/opt/centos7/cmake/3.11.1/bin/cmake -E cmake_link_script CMakeFiles/vfgen-cuda.dir/dlink.txt --verbose=1
>> > /projects/opt/centos7/cuda/9.0/bin/nvcc  --expt-relaxed-constexpr --std=c++11 -Xcompiler -fPIC -lmpi  -O3 -DNDEBUG  -Xcompiler=-fPIC -Wno-deprecated-gpu-targets -shared -dlink CMakeFiles/vfgen-cuda.dir/vfgen-cuda.cu.o -o CMakeFiles/vfgen-cuda.dir/cmake_device_link.o
>> > [100%] Linking CUDA executable vfgen-cuda
>> > cd /home/qth20/develop/tangram/build/app/vfgen-cuda && /projects/opt/centos7/cmake/3.11.1/bin/cmake -E cmake_link_script CMakeFiles/vfgen-cuda.dir/link.txt --verbose=1
>> > /projects/opt/centos7/gcc/6.4.0/bin/g++   CMakeFiles/vfgen-cuda.dir/vfgen-cuda.cu.o CMakeFiles/vfgen-cuda.dir/cmake_device_link.o -o vfgen-cuda /projects/opt/centos7/cuda/9.0/lib64/libcudart_static.a -ldl /usr/lib64/librt.so  -L"/projects/opt/centos7/cuda/9.0/lib64/stubs"
>> >  -L"/projects/opt/centos7/cuda/9.0/lib64" -lcudadevrt -lcudart_static -lrt -lpthread -ldl
>> > ===============================================================================
>> >
>> > so apparently, mpicc is used other parts of the project, but when linking extension between object codes it snaps back to g++(!). I have already specified CMAKE_C_COMPILER and CMAKE_CXX_COMPILER to be mpicc and mpic++, respectively. Inside the CMakeLists for this CUDA:
>> >
>> > ===============================================================================
>> > project(tangram LANGUAGES CXX CUDA)
>> > [...]
>> > if (CUDA_FOUND)
>> >    string(APPEND CMAKE_CUDA_FLAGS "--expt-relaxed-constexpr --std=c++11 ")
>> >    # Other CUDA flags
>> > endif(CUDA_FOUND)
>> >
>> > [... Inside CUDA app directory...]
>> > add_executable(vfgen-cuda vfgen-cuda.cu)
>> > target_link_libraries(vfgen-cuda ${EXTRA_LIBS} ${MPI_LIBRARIES})
>> > ===============================================================================
>> >
>> > Is something missed out?
>> >
>> > Thanks,
>> > Quang
>> > --
>> >
>> > Powered by www.kitware.com
>> >
>> > Please keep messages on-topic and check the CMake FAQ at: http://www.cmake.org/Wiki/CMake_FAQ
>> >
>> > Kitware offers various services to support the CMake community. For more information on each offering, please visit:
>> >
>> > CMake Support: http://cmake.org/cmake/help/support.html
>> > CMake Consulting: http://cmake.org/cmake/help/consulting.html
>> > CMake Training Courses: http://cmake.org/cmake/help/training.html
>> >
>> > Visit other Kitware open-source projects at http://www.kitware.com/opensource/opensource.html
>> >
>> > Follow this link to subscribe/unsubscribe:
>> > https://cmake.org/mailman/listinfo/cmake


More information about the CMake mailing list