hadoop2.7完全分布式安装

mxw8 4年前

1、安装JDK1.7+

2、SSH互信免登陆

3、/etc/profile

HADOOP_PREFIX=/opt/hadoop
JAVA_HOME=/opt/jdk1.7
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin

export HADOOP_PREFIX PATH JAVA_HOME

4、hadoop安装目录/etc/hadoop/hadoop-en.sh

export JAVA_HOME=/opt/jdk1.7
export HADOOP_COMMON_HOME=/opt/hadoop

5、编辑/etc/hosts

192.168.98.34 NameNode34
192.168.98.35 DataNode35
192.168.98.37 DataNode37
192.168.98.38 DataNode38


6.1 core-site.xml


<configuration>

<property>
  <name>hadoop.tmp.dir</name> 
  <value>/opt/hadoop/tmp</value>
  <description>A base for other temporary directories.</description>
</property>
<property>
  <name>fs.defaultFS</name>  
  <value>hdfs://NameNode34:9000</value>
</property>
<property>
  <name>io.file.buffer.size</name>  
  <value>131072</value>
</property>
</configuration>


6.2 hdfs-site.xml


<configuration>
<!--
<property>
  <name>dfs.replication</name>
  <value>3</value>
</property>
-->
<property>
  <name>dfs.namenode.name.dir</name>
  <value>file:/opt/hadoop/Name</value>
</property>
<property>
  <name>dfs.datanode.data.dir</name>
  <value>/opt/hadoop/Data</value>
</property>
<property>
  <name>dfs.blocksize</name>
  <value>268435456</value>
</property>
<property>
  <name>dfs.namenode.handler.count</name>
  <value>100</value>
</property>
</configuration>


6.3 yarn-site.xml


<configuration>

<!-- Site specific YARN configuration properties -->
<property>
  <name>yarn.acl.enable</name>
  <value>false</value>
</property>
<property>
  <name>yarn.admin.acl</name>
  <value>*</value>
</property>
<property>
  <name>yarn.log-aggregation-enable</name>
  <value>false</value>
</property>
<property>
   <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
   <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  </property>
  <property>
   <name>yarn.resourcemanager.address</name>
   <value>NameNode34:8032</value>
  </property>
  <property>
   <name>yarn.resourcemanager.scheduler.address</name>
   <value>NameNode34:8030</value>
  </property>
  <property>
   <name>yarn.resourcemanager.resource-tracker.address</name>
   <value>NameNode34:8035</value>
  </property>
  <property>
   <name>yarn.resourcemanager.admin.address</name>
   <value>NameNode34:8033</value>
  </property>
  <property>
   <name>yarn.resourcemanager.webapp.address</name>
   <value>NameNode34:8088</value>
  </property>
  <property>
   <name>yarn.resourcemanager.hostname</name>
   <value>NameNode34</value>
  </property>
<property>
   <name>yarn.nodemanager.aux-services</name>
   <value>mapreduce_shuffle</value>
</property>
</configuration>


6.4 mapred-site.xml


<configuration>
<property>
  <name>mapreduce.framework.name</name>
  <value>yarn</value>
</property>
 <property>
  <name>mapreduce.jobhistory.address</name>
  <value>NameNode34:10020</value>
 </property>
 <property>
  <name>mapreduce.jobhistory.webapp.address</name>
  <value>NameNode34:19888</value>
 </property>

<!--
<property>
  <name>mapreduce.framework.name</name>
  <value>1536</value>
</property>
<property>
  <name>mapreduce.map.java.opts</name>
  <value>-Xmx1024M</value>
</property>
<property>
  <name>mapreduce.reduce.memory.mb</name>
  <value>3072</value>
</property>
<property>
  <name>mapreduce.reduce.java.opts</name>
  <value>-Xmx2560M</value>
</property>
<property>
  <name>mapreduce.task.io.sort.mb</name>
  <value>512</value>
</property>
<property>
  <name>mapreduce.task.io.sort.factor</name>
  <value>100</value>
</property>
<property>
  <name>mapreduce.reduce.shuffle.parallelcopies</name>
  <value>50</value>
</property>
-->
</configuration>


7、执行 hdfs namenode -format

8、编辑 hadoop安装目录/etc/hadoop/slaves文件

localhost
DataNode35
DataNode37
DataNode38


9、执行 start-dfs.sh

10、执行 start-yarn.sh


http://NameNode:8088/ 查看yarn


http://NameNode:50070/ 查看hdfs



创建如下脚本程序

[root@db apps]# vi   scp_hadoop.sh 

脚本内容如下:

#!/bin/sh

for host in red  mongdb nginx;do

    echo $host

    scp  -r /work/apps/hadoop  sch@${host}:/work/apps/

Done

保存退出后, 修改文件的可执行属性 (chmod a+x *.sh)