User Tools

Site Tools


cluster:213

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
Next revision Both sides next revision
cluster:213 [2022/03/16 13:27]
hmeij07
cluster:213 [2022/07/25 13:19]
hmeij07 [Amber20]
Line 42: Line 42:
 scp 10.10.102.253:/root/.ssh/authorized_keys /root/.ssh/ scp 10.10.102.253:/root/.ssh/authorized_keys /root/.ssh/
 /etc/ssh/sshd_config (PermitRootLogin) /etc/ssh/sshd_config (PermitRootLogin)
 +
 +# Put the warewulf cluster key in authorized_keys
 +# Put eth0 fingerprints in cottontail/greentail52 known hosts
 +# add to relevant known_hosts_servername file
  
 # configure private subnets and ping file server # configure private subnets and ping file server
Line 53: Line 57:
  
 # make internet connection for yum # make internet connection for yum
 +
 +# iptables
 +dnf install -y iptables-services
 +vi /etc/sysconfig/iptables
 +# add 'local allow' ports  --dport 0:65535
 +systemctl start iptables # and enable
 +iptables -L
 +systemctl stop firewalld
 +systemctl disable firewalld
 +
 +
 # eth3 for ctt2 or eth1 for n100-101 # eth3 for ctt2 or eth1 for n100-101
 dnf install bind-utils dnf install bind-utils
 dig google.com dig google.com
 +iptables -L # check!
  
-#rocky8+Rocky8
 # https://docs.fedoraproject.org/en-US/epel/#Quickstart # https://docs.fedoraproject.org/en-US/epel/#Quickstart
 dnf config-manager --set-enabled powertools dnf config-manager --set-enabled powertools
Line 67: Line 83:
 dnf install gnuplot dnf install gnuplot
 dnf install alpine # pico dnf install alpine # pico
- +yum groupinstall "Server" server for compute nodes "Server with GUI"
-iptables +
-dnf install -y iptables-services +
-vi /etc/sysconfig/iptables +
-# add 'local allow' ports  --dport 0:65535 +
-systemctl start iptables # and enable +
-iptables -L +
-systemctl stop firewalld +
-systemctl disable firewalld+
  
 # other configs # other configs
Line 107: Line 115:
 echo "relayhost = 192.168.102.251" >> /etc/postfix/main.cf echo "relayhost = 192.168.102.251" >> /etc/postfix/main.cf
  
 +# on head node /etc/chronyc.conf
 +allow 192.168.0.0/16
 # compute nodes /etc/chronyc.conf # compute nodes /etc/chronyc.conf
 #pool 2.pool.ntp.org iburst #pool 2.pool.ntp.org iburst
 Server 192.168.102.250 Server 192.168.102.250
 Server 192.168.102.251 Server 192.168.102.251
 +# check
 +chronyc sources
 +
 +
 +# on head node install from epel repo
 +yum install slurm-openlava
 +# error on conflicting libs, too bad!
  
  
Line 125: Line 142:
 yum install cmake -y yum install cmake -y
 yum install libjpeg libjpeg-devel libjpeg-turbo-devel -y yum install libjpeg libjpeg-devel libjpeg-turbo-devel -y
-amber+ 
 +#easybuild 
 +yum install libibverbs libibverbs-devel 
 + 
 +# amber20 cmake readline error fix needs 
 +yum install ncurses-c++-libs-6.1-9.20180224.el8.x86_64.rpm \ 
 +            ncurses-devel-6.1-9.20180224.el8.x86_64.rpm \ 
 +            readline-devel-7.0-10.el8.x86_64.rpm 
 + 
 +# amber20
 yum -y install tcsh make \ yum -y install tcsh make \
                gcc gcc-gfortran gcc-c++ \                gcc gcc-gfortran gcc-c++ \
Line 132: Line 158:
                perl perl-ExtUtils-MakeMaker util-linux wget \                perl perl-ExtUtils-MakeMaker util-linux wget \
                bzip2 bzip2-devel zlib-devel tar                 bzip2 bzip2-devel zlib-devel tar 
-yum update -y 
-yum clean all 
  
 # CENTOS7 pick the kernel vendor used for now # CENTOS7 pick the kernel vendor used for now
Line 144: Line 168:
 # compute nodes old level 3 # compute nodes old level 3
 systemctl set-default multi-user.target systemctl set-default multi-user.target
-# remove internet, bring private back up 
-reboot 
  
 # compute nodes only # compute nodes only
Line 163: Line 185:
 # openjdk version "1.8.0_322" # openjdk version "1.8.0_322"
 rpm -qa | grep ^java  # check rpm -qa | grep ^java  # check
 +yum install java-1.8.0-openjdk java-1.8.0-openjdk-devel \
 +java-1.8.0-openjdk-headless javapackages-filesystem
 # python v 3.9 # python v 3.9
 yum install python39 python39-devel yum install python39 python39-devel
 +ln -s /usr/bin/python3.9 /usr/bin/python
 # fftw 3.3.5-11.el8 # fftw 3.3.5-11.el8
 yum install fftw fftw-devel yum install fftw fftw-devel
Line 175: Line 200:
 # dmtcp # dmtcp
 yum install dmtcp dmtcp-devel yum install dmtcp dmtcp-devel
 +
 +# check status of service munge
  
 yum clean all yum clean all
 +# eth3 onboot=no, private networks only
 +systemctl disable iptables
 reboot reboot
  
 +# now make it an ohpc compute node
 +  yum repolist
 +  yum  install ohpc-base-compute
 +  
 +  scp cottontail2:/etc/resolv.conf /etc/resolv.conf
 +  yum  install ohpc-slurm-client
 +  systemctl enable munge
 +  systemctl start munge
 +  scp cottontail2:/etc/munge/munge.key /etc/munge/munge.key
 +  echo SLURMD_OPTIONS="--conf-server 192.168.102.250" > /etc/sysconfig/slurmd
 +  yum  install --allowerasing lmod-ohpc
 +  grep '/var' /etc/slurm/slurm.conf
 +  mkdir /var/log/slurm 
 +  chown slurm:munge /var/log/slurm 
 +  mkdir /var/spool/slurm 
 +  chown slurm:munge /var/spool/slurm 
 +  scp cottontail2:/etc/slurm/slurm.conf /etc/slurm/slurm.conf
 +  scp cottontail2:/etc/slurm/gres.conf /etc/slurm/gres.conf
 +  scp cottontail2:/etc/profile.d/lmod.sh /etc/profile.d/
 +  
 +# /var/[log|spool|run] need to be removed from
 +/usr/libexec/warewulf/wwmkchroot/gold-template
 +
 +#test
 +  /usr/sbin/slurmd -D 
 +  
 +# start via rc.local
 +chmod +x /etc/rc.d/rc.local
 +#timing issue with munge
 +sleep 15
 +/usr/sbin/slurmd
 +  
 +# slurmd ???
 + libhwloc.so.15 => /opt/ohpc/pub/libs/hwloc/lib/libhwloc.so.15 (0x00007fd6e5684000)
 +
 +# add to zenoss edit /etc/snmp/snmpd.conf, enable and start
 +rocommunity public
 +dontLogTCPWrappersConnects yes
  
 </code> </code>
-==== Configure Recipe ==== 
  
-Steps. "Ala n37" ... so the RTX nodes are similar to the K20 nodes and we can put the local software in place. See [[cluster:172|K20 Redo]] page and [[cluster:192|exx96]] Recipe for CentOS 7+==== Pics ====
  
-New recipe for n100-n101 sporting Rocky 8.5 on ''cottontail2''\\ 
-Put node on internet...first though 
  
-  * ** Vanilla Backups** using Warewulf and plain rsync (--exclude=[proc/,sys/,run/]+My data center robot thingie and node n100's gpus\\ 
 + 
 +\\ 
 + 
 +{{:cluster:dcrobot.jpg?400|}} 
 +\\ 
 +{{:cluster:n100.jpg?400|}}\\ 
 +\\ 
 + 
 +==== Amber20 ==== 
 + 
 +OpenHPC
  
 <code> <code>
  
-# login as root check some things out... + 988  tar xvfj ../AmberTools21.tar.bz2  
-free -g +  989  tar xvfj ../Amber20.tar.bz2  
-nvidia-smi # if gpus +  993  cd amber20_src/ 
-cat /proc/cpuinfo+  994  cd build/ 
 +  996  vi run_cmake
  
-check and set local time zone + Assume this is Linux:
-mv /etc/localtime /etc/localtime.backup +
-ln -s /usr/share/zoneinfo/America/New_York /etc/localtime+
  
-change passwords for root and vendor account +serial, do on head node, with miniconda true, compile, install 
-passwd +  cmake $AMBER_PREFIX/amber20_src \ 
-passwd microway +    -DCMAKE_INSTALL_PREFIX=/share/apps/CENTOS8/ohpc/software/amber/20 \ 
-# set hostname +    -DCOMPILER=GNU  \ 
-hostnamectl set-hostname cottontail2+    -DMPI=FALSE -DCUDA=FALSE -DINSTALL_TESTS=TRUE \ 
 +    -DDOWNLOAD_MINICONDA=TRUE -DMINICONDA_USE_PY3=TRUE \ 
 +    2>&1 | tee  cmake.log
  
-root: sync cottontail's master and known_hosts (tails+stores) +Env
-ssh-keygen -t rsa +
-scp 10.10.102.253:/root/.ssh/authorized_keys /root/.ssh/ +
-/etc/ssh/sshd_config (PermitRootLogin)+
  
-# configure private subnets and ping file server +[hmeij@n100 ~]$ module load cuda/11.6
-cd /etc/sysconfig/network-scripts/ +
-vi ifcfg-eth0 # 192.168.102.x +
-vi ifcfg-eth1 # 10.10.102.x +
-vi ifcfg-eth3 # 129.133.52.x +
-systemctl restart network +
-ping -c 3 192.168.102.42 +
-ping -c 3 10.10.102.42+
  
-# make internet connection for yum +[hmeij@n100 ~]$ echo $CUDA_HOME 
-# eth3 for ctt2 or eth1 for n100-101 +/usr/local/cuda
-dnf install bind-utils +
-dig google.com+
  
-#rocky8 +[hmeij@n100 ~]$ which nvcc mpicc gcc 
-# https://docs.fedoraproject.org/en-US/epel/#Quickstart +/usr/local/cuda/bin/nvcc 
-dnf config-manager --set-enabled powertools +/opt/ohpc/pub/mpi/openmpi4-gnu9/4.1.1/bin/mpicc 
-dnf install epel-release +/opt/ohpc/pub/compiler/gcc/9.4.0/bin/gcc
-dnf install netcdf netcdf-devel +
-dnf install yum-utils # yumdownloader +
-dnf install ddd  +
-dnf install grace +
-dnf install gnuplot +
-dnf install alpine # pico+
  
-iptables +[FIXED} cmake error on conda install, set to FALSE 
-dnf install -y iptables-services +# OS native python, install on n[100-101] 
-vi /etc/sysconfig/iptables +-- Python version 3.9 -- OK 
-# add 'local allow' ports  --dport 0:65535 +-- Found PythonLibs: /usr/lib64/libpython3.9.so (found version "3.9.6" 
-systemctl start iptables # and enable +-- Checking for Python package numpy -- not found 
-iptables -L +-- Checking for Python package scipy -- not found 
-systemctl stop firewalld +-- Checking for Python package matplotlib -- not found 
-systemctl disable firewalld+-- Checking for Python package setuptools -- found 
 +[END FIXED]
  
-other configs +mpi & cuda FALSE builds serial 
-vi /etc/selinux/config # disabled, do not mistype, kernel will not boot! +./run_cmake 
-mv /home /usr/local/ +make install 
-vi /etc/passwd (exx, dockeruser $HOME)+# lots and lots of warnings
  
-## edit passwd, shadow, group, hosts files ## +then 
-## make -orig backups and stage in /home/tmp/global +source /share/apps/CENTOS8/ohpc/software/amber/20/amber.sh
-## cottontail2 = greentail52 sections+
  
-mkdir /sanscratch /home/localscratch +on n100 now, parallel, set miniconda flags to FALSE 
-chmod ugo+rwx /sanscratch /home/localscratch +-MPI=TRUE 
-chmod o+t /sanscratch /home/localscratch  +./run_cmake 
-link localscratch in 1.4T /home to / +make install
-mkdir /home  +
-cd /home # local dir +
-ln -s /zfshomes/apps +
-ln -s /zfshomes/tmp +
-ln -s /zfshomes/csmith06 +
-ln -s /zfshomes /share+
  
-fstab file mounts +on n100 just change cuda flag 
-# cottontail2 = greentail52 +-CUDA=TRUE 
-n100-n101 n79+./run_cmake 
 +make install
  
-postfix +#tests 
-dnf install postfix +cd $AMBERHOME 
-dnf install mailx +make test.serial 
-systemctl enable postfix +export DO_PARALLEL="mpirun -np 6" 
-echo "relayhost 192.168.102.42" >> /etc/postfix/main.cf+make test.parallel 
 +export CUDA_VISIBLE_DEVICES=
 +make test.cuda.serial 
 +make test.cuda.parallel
  
-# compute nodes /etc/chronyc.conf +</code>
-#pool 2.pool.ntp.org iburst +
-Server 192.168.102.250 +
-Server 192.168.102.251+
  
 +==== Amber22 ====
  
-# add packages and update +OpenHPC
-yum install epel-release -y +
-yum install flex bison -y  +
-yum install tcl tcl-devel dmtcp -y +
-yum install net-snmp net-snmp-libs net-tools net-snmp-utils -y +
-yum install freeglut-devel libXi-devel libXmu-devel -y +
-yum install blas blas-devel lapack lapack-devel boost boost-devel -y +
-yum install lm_sensors lm_sensors-libs -y +
-yum install zlib-devel bzip2-devel -y +
-yum install openmpi openmpi-devel perl-ExtUtils-MakeMaker -y +
-yum install cmake -y +
-yum install libjpeg libjpeg-devel libjpeg-turbo-devel -y +
-# amber +
-yum -y install tcsh make \ +
-               gcc gcc-gfortran gcc-c++ \ +
-               which flex bison patch bc \ +
-               libXt-devel libXext-devel \ +
-               perl perl-ExtUtils-MakeMaker util-linux wget \ +
-               bzip2 bzip2-devel zlib-devel tar  +
-yum update -y +
-yum clean all+
  
-# CENTOS7 pick the kernel vendor used for now +<code>
-grep ^menuentry /etc/grub2.cfg +
-grub2-set-default 1 +
-ls -d /sys/firmware/efi && echo "EFI" || echo "Legacy" +
-#grub2-mkconfig -o /boot/grub2/grub.cfg          # legacy +
-#grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg # efi+
  
-# compute nodes old level 3 + 988  tar xvfj ../AmberTools22.tar.bz2  
-systemctl set-default multi-user.target +  989  tar xvfj ../Amber22.tar.bz2  
-# remove internet, bring private back up +  993  cd amber20_src/ 
-reboot+  994  cd build/ 
 +  996  vi run_cmake
  
-compute nodes only +#  Assume this is Linux:
-# leave old cuda versions behind (9.2 | 10.2) +
-cd usr/local/ +
-# scp fron n79:/usr/local/ +
-amber16/  amber20/ fsl-5.0.10/ gromacs-2018/ lammps-22Aug18/+
  
-compute nodes only /usr/local/bin/ +serialdo on head nodewith miniconda true, compile, install 
-# copy scripts: gpu-freegpu-infogpu-process +  cmake $AMBER_PREFIX/amber22_src \ 
-# copy 10.10.102.89:/usr/local/bin/n37.openmpi.wrapper /usr/local/bin+    -DCMAKE_INSTALL_PREFIX=/share/apps/CENTOS8/ohpc/software/amber/22 \ 
-# done+    -DCOMPILER=GNU 
 +    -DMPI=FALSE -DCUDA=FALSE -DINSTALL_TESTS=TRUE \ 
 +    -DDOWNLOAD_MINICONDA=TRUE \ 
 +    2>&1 | tee  cmake.log
  
-# FINISH native vanilla installs 
-# R version 4.1.2 (2021-11-01) -- "Bird Hippie" 
-yum install R R-devel 
-# openjdk version "1.8.0_322" 
-rpm -qa | grep ^java  # check 
-# python v 3.9 
-yum install python39 python39-devel 
-# fftw 3.3.5-11.el8 
-yum install fftw fftw-devel 
-#gnu scientific libraries 
-yum install gsl gsl-devel 
-# ruby 2.5.9-109.module+el8.5.0 
-yum install ruby ruby-devel 
-# obabel chem file formats 
-yum install openbabel openbabel-devel 
-# dmtcp 
-yum install dmtcp dmtcp-devel 
  
-yum clean all +# Env
-reboot+
  
 +[hmeij@n100 ~]$ module load cuda/11.6
 +
 +[hmeij@n100 ~]$ echo $CUDA_HOME
 +/usr/local/cuda
 +
 +[hmeij@n100 ~]$ which nvcc mpicc gcc
 +/usr/local/cuda/bin/nvcc
 +/opt/ohpc/pub/mpi/openmpi4-gnu9/4.1.1/bin/mpicc
 +/opt/ohpc/pub/compiler/gcc/9.4.0/bin/gcc
 +
 +# [FIXED} cmake error on conda install, set to FALSE
 +# OS native python, install on n[100-101]
 +-- Python version 3.9 -- OK
 +-- Found PythonLibs: /usr/lib64/libpython3.9.so (found version "3.9.6"
 +-- Checking for Python package numpy -- not found
 +-- Checking for Python package scipy -- not found
 +-- Checking for Python package matplotlib -- not found
 +-- Checking for Python package setuptools -- found
 +[END FIXED]
 +
 +# mpi & cuda FALSE builds serial
 +./run_cmake
 +make install
 +# lots and lots of warnings
 +
 +# then
 +source /share/apps/CENTOS8/ohpc/software/amber/20/amber.sh
 +
 +# on n100 now, parallel, set miniconda flags to FALSE
 +-MPI=TRUE
 +./run_cmake
 +make install
 +
 +# on n100 just change cuda flag
 +-CUDA=TRUE
 +./run_cmake
 +make install
 +
 +#tests
 +cd $AMBERHOME
 +make test.serial
 +export DO_PARALLEL="mpirun -np 6"
 +make test.parallel
 +export CUDA_VISIBLE_DEVICES=0
 +make test.cuda.serial
 +make test.cuda.parallel
  
 </code> </code>
  
-==== Pics ====+**[[cluster:0|Back]]**
  
  
-\\ 
 **[[cluster:0|Back]]** **[[cluster:0|Back]]**
  
cluster/213.txt · Last modified: 2024/01/12 15:09 by hmeij07