hadoop全分布式集群搭建
-
安装JDK环境
tar -zxvf /opt/software/jdk-8u301-linux-x64.tar.gz -C /opt/module
修改配置文件
vi /etc/profile.d/my_env.sh
my_env.sh
#JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_301
export PATH=$PATH:$JAVA_HOME/bin
同步到各个结点(脚本)
xsync /etc/profile
最后(生效并测试每个节点)
source /etc/profile
java
-
安装hadoop
tar -zxvf /opt/software/hadoop-3.1.3.tar.gz -C /opt/module
修改配置文件(hadoop)
vi /etc/profile.d/my_env.sh
my_env.sh
#HADOOP_HOME
export HADOOP_HOME=/opt/module/hadoop-3.1.3
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
修改配置文件(集群)
cd /opt/module/hadoop-3.1.3/etc/hadoop/
core-site.xml
<!--指定Namenode的地址-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:8020</value>
</property>
<!--指定hadoop数据的存储目录-->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-3.1.3/data</value>
</property>
<!--配置HDFS网页登录使用的静态用户为hadoop
<property>
<name>hadoop.http.staticuser.user</name>
<value>hadoop</value>
</property>-->
hdfs-site.xml
<!--nn web端访问地址-->
<property>
<name>dfs.namenode.http-address</name>
<value>master:9870</value>
</property>
<!--2nn web端访问地址-->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>slave2:9868</value>
</property>
yarn-site.xml
<!--指定MR走shuffle-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--指定ResourceManager的地址-->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>slave1</value>
</property>
<!--环境变量的继承-->
<property>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<!-- 开启日志聚集功能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 设置日志聚集服务器地址 -->
<property>
<name>yarn.log.server.url</name>
<value>http://master:19888/jobhistory/logs</value>
</property>
<!-- 设置日志保留时间为 7 天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
mapred-site.xml
<!--指定mapreduce程序运行在Yarn上-->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 历史服务器端地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<!-- 历史服务器 web 端地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
workers(不能有任何的空格)
master
slave1
slave2
同步到各个结点(脚本)
xsync /opt/module/hadoop-3.1.3/etc/hadoop/
-
ssh免密登录
master(主机)
master$ ssh master
master$ cd .ssh/
master$ ssh-keygen -t rsa #三个空格
master$ ssh-copy-id master
master$ ssh-copy-id slave1
master$ ssh-copy-id slave2
slave1和slave2同上
master(主机root用户)
master$ su root
master$ cd .ssh/
master$ ssh-keygen -t rsa #三个空格
master$ ssh-copy-id master
master$ ssh-copy-id slave1
master$ ssh-copy-id slave2
-
初始化并启动服务(master)
cd /opt/module/hadoop-3.1.3/
hdfs namenode -format
master
cd /opt/module/hadoop-3.1.3/
sbin/start-dfs.sh
bin/mapred --daemon start historyserver
slave1
cd /opt/module/hadoop-3.1.3/
sbin/start-yarn.sh
:有错误和不足请随时指点