Back

Build Hadoop (test) Cluster

Use Hadoop (test) Cluster

These are my notes building a test Hadoop cluster on virtual machines in VMware. They consists of a blending of instructions posted by others with my commentary added. Please review these sites so this page makes sense to you.

Other sites you want to read:

And

Building

# all nodes, i used pdsh to spawn commands across all nodes
rpm -ivh /usr/local/src/jdk-7u21-linux-x64.rpm                    
rpm -ivh /usr/local/src/jre-7u21-linux-x64.rpm                                                                        
alternatives --install /usr/bin/java java /usr/java/latest/bin/java 1600 
alternatives --auto java
# fix this as some Hadoop scripts look at this location
cd /usr/java
ln -s ./latest/bin
which java 
java -version
# all nodes
cd /etc/yum.repos.d/
wget http://archive.cloudera.com/redhat/6/x86_64/cdh/cloudera-cdh3.repo
yum update                                                             
yum install hadoop-0.20
setenforce 0
# edit this file and disable  
vi /etc/selinux/config 
# edit this file and restart iptables     
vi /etc/sysconfig/iptables
# hadoop
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50070 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50075 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50090 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50105 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50030 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50060 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 8020 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50010 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50020 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 50100 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 8021 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 9001 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 8012 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 54310 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -m iprange --src-range 129.133.x.xxx-129.133.x.xxx --dport 54311 -j ACCEPT
# plus 127.0.0.1:0 and maybe 9000
# hadoop admin status
-A INPUT -m state --state NEW -m tcp -p tcp -s 129.133.0.0/16 --dport 50030 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -s 129.133.0.0/16 --dport 50070 -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp -s 129.133.0.0/16 --dport 50075 -j ACCEPT
# head node
yum -y install hadoop-0.20-namenode
yum -y install hadoop-0.20-jobtracker
# data node
yum -y install hadoop-0.20-datanode                                    
yum -y install hadoop-0.20-tasktracker   
# all nodes                                                            
cp -r /etc/hadoop-0.20/conf.empty   /etc/hadoop-0.20/conf.MyCluster
alternatives --install /etc/hadoop-0.20/conf hadoop-0.20-conf /etc/hadoop-0.20/conf.MyCluster 50
alternatives --set hadoop-0.20-conf /etc/hadoop-0.20/conf.MyCluster
alternatives --display hadoop-0.20-conf
vi /etc/hadoop-0.20/conf.MyCluster/core-site.xml
vi /etc/hadoop-0.20/conf.MyCluster/hdfs-site.xml
vi /etc/hadoop-0.20/conf.MyCluster/mapred-site.xml
# all nodes
mkdir -p /mnt/hdfs/1
mkdir -p /mnt/hdfs/1/namenode
mkdir -p /mnt/hdfs/1/datanode
mkdir -p /mnt/hdfs/1/mapred
chown -R hdfs:hadoop /mnt/hdfs
chown -R mapred:hadoop /mnt/hdfs/1/mapred
# headnode only
sudo -u hdfs hadoop namenode -format
# all nodes
chgrp hdfs /usr/lib/hadoop-0.20/
chmod g+rw /usr/lib/hadoop-0.20/
# head node
/etc/init.d/hadoop-0.20-namenode start
/etc/init.d/hadoop-0.20-jobtracker start

# work nodes
/etc/init.d/hadoop-0.20-datanode start
/etc/init.d/hadoop-0.20-tasktracker start
# head node only
sudo -u hdfs hadoop fs -mkdir /mapred/system
sudo -u hdfs hadoop fs -chown mapred:hadoop /mapred/system
sudo -u hdfs hadoop dfs -mkdir /tmp
sudo -u hdfs hadoop dfs -chmod -R 1777 /tmp
sudo -u hdfs hadoop dfsadmin -report
sudo -u hdfs hadoop dfs -df

TODO


Back