Hadoop 2.0集群配置详细教程[虚拟机下配置成功](2)

配置环境变量: vim /etc/profile
添加
export HADOOP_DEV_HOME=/usr/local/hadoop      #hadoop 安装目录
export PATH=$PATH:$HADOOP_DEV_HOME/bin     
export PATH=$PATH:$HADOOP_DEV_HOME/sbin
export HADOOP_MAPARED_HOME=${HADOOP_DEV_HOME}
export HADOOP_COMMON_HOME=${HADOOP_DEV_HOME}
export HADOOP_HDFS_HOME=${HADOOP_DEV_HOME}
export YARN_HOME=${HADOOP_DEV_HOME}
export HADOOP_CONF_DIR=${HADOOP_DEV_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_DEV_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_DEV_HOME}/etc/hadoop

7)配置文件
配置hadoop-env.sh

vim /usr/hadoop/hadoop-2.0.0-alpha/etc/hadoop/hadoop-env.sh
在末尾添加 exportJAVA_HOME=/usr/java/jdk1.7

core-site.xml

在 configuration节点里面添加属性

<property>
<name>hadoop.tmp.dir</name>
<value>/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs:// 192.168.216.131:9000</value>
</property>


slave配置

vim /home/hadoop/hadoop/etc/hadoop/slaves
添加 slave的 IP
192.168.216.131
192.168.216.132
192.168.216.133


配置hdfs-site.xml

vim /home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
添加节点

<property>
<name>dfs.replication</name>
<value>3</value>
</property>

<property>
<name>dfs.namenode.name.dir</name>
<value>file:/hadoop/hdfs/name</value>
<final>true</final>
</property>

<property>
<name>dfs.federation.nameservice.id</name>
<value>ns1</value>
</property>

<property>
<name>dfs.namenode.backup.address.ns1</name>
<value>192.168.216.131:50100</value>
</property>

<property>
<name>dfs.namenode.backup.http-address.ns1</name>
<value>192.168.216.131:50105</value>
</property>

<property>
<name>dfs.federation.nameservices</name>
<value>ns1</value>
</property>

<property>
<name>dfs.namenode.rpc-address.ns1</name>
<value>192.168.216.131:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns2</name>
<value>192.168.216.131:9000</value>
</property>

<property>
<name>dfs.namenode.http-address.ns1</name>
<value>192.168.216.131:23001</value>
</property>

<property>
<name>dfs.namenode.http-address.ns2</name>
<value>192.168.216.131:13001</value>
</property>

<property>
<name>dfs.dataname.data.dir</name>
<value>file:/hadoop/hdfs/data</value>
<final>true</final>
</property>

<property>
<name>dfs.namenode.secondary.http-address.ns1</name>
<value>192.168.216.131:23002</value>
</property>

<property>
<name>dfs.namenode.secondary.http-address.ns2</name>
<value>192.168.216.131:23002</value>
</property>

<property>
<name>dfs.namenode.secondary.http-address.ns1</name>
<value>192.168.216.131:23003</value>
</property>

<property>
<name>dfs.namenode.secondary.http-address.ns2</name>
<value>192.168.216.131:23003</value>
</property>


配置yarn-site.xml

添加节点

<property>
<name>yarn.resourcemanager.address</name>
<value>192.168.216.131:18040</value>
</property>

<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>192.168.216.131:18030</value>
</property>

<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>192.168.216.131:18088</value>
</property>

<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>192.168.216.131:18025</value>
</property>

<property>
<name>yarn.resourcemanager.admin.address</name>
<value>192.168.216.131:18141</value>
</property>

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce.shuffle</value>
</property>

<property>

<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>

<value>org.apache.hadoop.mapred.ShuffleHandler</value>

</property>

在 /etc/hadoop 使用以下内容创建一个文件mapred-site.xml

<property>

<name>mapreduce.framework.name</name>

<value>yarn</value>

</property>

<property>


<name>mapred.system.dir</name>

<value>file:/hadoop/mapred/system</value>

<final>true</final>

</property>

<property>

<name>mapred.local.dir</name>

<value>file:/hadoop/mapred/local</value>

<final>true</final>

</property>


配置httpfs-site.xml

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:http://www.heiqu.com/481d0ec52acd8ff14208cd3a1bf81a72.html