User Tools

Site Tools


cluster:172

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
Next revision Both sides next revision
cluster:172 [2018/09/25 15:20]
hmeij07 [Finish]
cluster:172 [2018/09/26 14:34]
hmeij07
Line 34: Line 34:
 yum install kernel-devel kernel-headers (remove old headers after reboot) yum install kernel-devel kernel-headers (remove old headers after reboot)
 yum install gcc gcc-gfortran gcc-c++  # CHROOT done yum install gcc gcc-gfortran gcc-c++  # CHROOT done
 +
 +# /etc/modprobe.d/blacklist-nouveau.conf (new file by nvidia)
 +# reboot before driver installation # CHROOT done
 +blacklist nouveau
 +options nouveau modeset=0
 +
 +# new kernel initramfs, load
 +dracut --force
 +
 +reboot
 +
  
 # download runfiles from https://developer.nvidia.com/cuda-downloads # download runfiles from https://developer.nvidia.com/cuda-downloads
Line 54: Line 65:
 Install the CUDA 9.2 Samples? Install the CUDA 9.2 Samples?
 (y)es/(n)o/(q)uit: n (y)es/(n)o/(q)uit: n
- 
-# /etc/modprobe.d/blacklist-nouveau.conf (new file by nvidia) 
-# reboot before driver installation # CHROOT done 
-blacklist nouveau 
-options nouveau modeset=0 
-reboot 
  
 # nvidia driver # nvidia driver
-./cuda_name_of_runfile \-\-silent \-\-accept-eula driver+./cuda_name_of_runfile -silent -driver 
 + 
 +# Device files/dev/nvidia* exist with 0666 permissions? 
 +# They were not  
 +/usr/local/src/nvidia-modprobe.sh
  
 # backup # backup
 [root@n37 src]# rpm -qf /usr/lib/libGL.so [root@n37 src]# rpm -qf /usr/lib/libGL.so
 file /usr/lib/libGL.so is not owned by any package file /usr/lib/libGL.so is not owned by any package
-cp /usr/lib/libGL.so /usr/lib/libGL.so-nvidia+cp /usr/lib/libGL.so   /usr/lib/libGL.so-nvidia 
 +cp /usr/lib/libGl.so.1 /usr/lib/libGL.so.1-nvidia
  
 [root@n37 src]# ls /etc/X11/xorg.conf [root@n37 src]# ls /etc/X11/xorg.conf
Line 74: Line 84:
 [root@n37 src]# [root@n37 src]#
 [root@n37 src]# scp n78:/etc/X11/xorg.conf /etc/X11/  # CHROOT done [root@n37 src]# scp n78:/etc/X11/xorg.conf /etc/X11/  # CHROOT done
- 
-# Device files/dev/nvidia* exist with 0666 permissions? 
-# They were not  
-/usr/local/src/nvidia-modprobe.sh 
- 
-# new kernel initramfs, load 
-dracut --force 
  
 # for mapd graphics support needs to be enabled # for mapd graphics support needs to be enabled
Line 369: Line 372:
  
  
-To do another node, the steps are NOT WORKING! +To do another node, the steps are
-Trying n36 with cuda rpm (local)+
  
-  * add node in deploy.txt of n37.chroot/+  * add node in deploy.txt of n36.chroot/  (centos 7.2)
   * ./deploy.txt `grep node_name deploy.txt`   * ./deploy.txt `grep node_name deploy.txt`
   * scp in place passwd, shadow, group, hosts, fstab from global archive   * scp in place passwd, shadow, group, hosts, fstab from global archive
Line 381: Line 383:
   * hostnamectl set-hostname node_name (logout/login)   * hostnamectl set-hostname node_name (logout/login)
   * eth1 on 129.133   * eth1 on 129.133
-  * rpm -kernel-devel +  * yum update 
-  * rpm -i /usr/local/src/cuda-repo-rhel10-0-local-10.0.130-410.48-1.0-1.x86_64.rpm+  * yum install kernel-headers kernel-devel 
 +  * put n37 tarball in /, unpack, remove cuda-9.
 +  * reboot 
   * Nvidia install: files in /usr/local/src   * Nvidia install: files in /usr/local/src
-    * rpm -i cuda-repo-rhel7-10-0-local-10.0.130-410.48-1.0-1.x86_64.rpm +    * sh runfile 
-    * yum clean all +    * reboot (nouveau) 
-    * yum install cuda+    * ./runfile -silent -driver 
 +    * reboot
  
  
cluster/172.txt · Last modified: 2020/07/15 17:52 by hmeij07