需要在Hadoop-全分布式配置的基础上进行配置

一、配置时间同步

(在所有节点上)

# 以master为例
# 安装chrony
[root@master ~]# yum -y install chrony

# 编辑配置文件
[root@master ~]# vi /etc/chrony.conf
server time1.aliyun.com iburst

# 开启chronyd
[root@master ~]# systemctl restart chronyd
[root@master ~]# systemctl enable chronyd
Created symlink from /etc/systemd/system/multi-user.target.wants/chronyd.service to /usr/lib/systemd/system/chronyd.service.

# 查看状态
[root@master ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enabled)
   Active: active (running) since Fri 2022-04-22 15:00:38 CST; 3min 31s ago
  Process: 795 ExecStartPost=/usr/libexec/chrony-helper update-daemon (code=exited, status=0/SUCCESS)
  Process: 762 ExecStart=/usr/sbin/chronyd $OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 781 (chronyd)
   CGroup: /system.slice/chronyd.service
           └─781 /usr/sbin/chronyd

Apr 22 15:00:37 master.example.com systemd[1]: Starting NTP client/server...
Apr 22 15:00:37 master.example.com chronyd[781]: chronyd version 2.1.1 starting (+CMDMON +NT...H)
Apr 22 15:00:38 master.example.com chronyd[781]: Frequency 0.000 +/- 1000000.000 ppm read fr...ft
Apr 22 15:00:38 master.example.com systemd[1]: Started NTP client/server.
Hint: Some lines were ellipsized, use -l to show in full.

# 看到running则表示成功

二、部署zookeeper(master节点)

1、使用xftp上传软件包至/opt/software/

2、解压安装包

[root@master ~]# tar xf /opt/software/zookeeper-3.4.8.tar.gz -C /usr/local/src/
[root@master ~]# cd /usr/local/src/
[root@master src]# mv zookeeper-3.4.8 zookeeper

3、创建 data 和 logs 文件夹

[root@master src]# cd /usr/local/src/zookeeper/
[root@master zookeeper]# mkdir data logs

4、写入该节点的标识编号

[root@master zookeeper]# echo '1' > /usr/local/src/zookeeper/data/myid

5、修改配置文件 zoo.cfg

[root@master zookeeper]# cd /usr/local/src/zookeeper/conf/
[root@master conf]# cp zoo_sample.cfg zoo.cfg
[root@master conf]# vi zoo.cfg
dataDir=/usr/local/src/zookeeper/data
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888

# 表示三个 ZooKeeper 节点的访问端口号

6、配置环境变量zookeeper.sh

[root@master conf]# vi /etc/profile.d/zookeeper.sh
export ZOOKEEPER_HOME=/usr/local/src/zookeeper
export PATH=${ZOOKEEPER_HOME}/bin:$PATH

7、修改目录的归属用户

[root@master conf]# chown -R hadoop.hadoop /usr/local/src/

8、拷贝文件到slave

[root@master conf]# scp -r /usr/local/src/zookeeper slave1:/usr/local/src/
[root@master conf]# scp -r /usr/local/src/zookeeper slave2:/usr/local/src/
[root@master conf]# scp /etc/profile.d/zookeeper.sh slave1:/etc/profile.d/
zookeeper.sh                                                   100%   87     0.1KB/s   00:00    
[root@master conf]# scp /etc/profile.d/zookeeper.sh slave2:/etc/profile.d/
zookeeper.sh                                                   100%   87     0.1KB/s   00:00    

9、修改目录的归属用户

# 在slave1节点
[root@slave1 ~]# chown -R hadoop.hadoop /usr/local/src/

# 在slave2节点
[root@slave2 ~]# chown -R hadoop.hadoop /usr/local/src/

10、写入每个节点的标识编号

# 在slave1节点
[root@slave1 ~]# echo '2' > /usr/local/src/zookeeper/data/myid

# 在slave2节点
[root@slave2 ~]# echo '3' > /usr/local/src/zookeeper/data/myid

三、启动 ZooKeeper

master节点

[root@master conf]# su - hadoop
Last login: Fri Apr 22 16:26:07 CST 2022 on pts/0

[hadoop@master ~]$ jps
47472 Jps
45811 NameNode
46199 ResourceManager
46026 SecondaryNameNode

[hadoop@master ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

[hadoop@master ~]$ jps
45811 NameNode
47539 Jps
46199 ResourceManager
46026 SecondaryNameNode
47502 QuorumPeerMain
# 在每个节点上看到QuorumPeerMain进程才表示成功

[hadoop@master ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: follower
# 确保能够看到1个leader, 2个follower才表示启动成功

slave1节点

[root@slave1 ~]# su - hadoop

[hadoop@slave1 ~]$ jps
44720 Jps
7187 NodeManager
7050 DataNode

[hadoop@slave1 ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

[hadoop@slave1 ~]$ jps
7187 NodeManager
44741 QuorumPeerMain
7050 DataNode
44782 Jps

[hadoop@slave1 ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: leader

slave2节点

[root@slave2 ~]# su - hadoop

[hadoop@slave2 ~]$ jps
42178 Jps
41147 DataNode
41260 NodeManager

[hadoop@slave2 ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

[hadoop@slave2 ~]$ jps
42242 Jps
41147 DataNode
41260 NodeManager
42207 QuorumPeerMain

[hadoop@slave2 ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: follower

四、部署HBase

参考下一节【Hadoop】HBase组件配置

只有hbase-env.sh配置文件下true改为false,其他配置步骤一样

vi hbase-env.sh
export JAVA_HOME=/usr/local/src/jdk
export HBASE_MANAGES_ZK=false
export HBASE_CLASSPATH=/usr/local/src/hadoop/etc/hadoop/

五、启动hadoop

# 在master上启动分布式hadoop集群
[hadoop@master ~]$ start-all.sh
[hadoop@master ~]$ jps
3210 Jps
2571 NameNode
2780 SecondaryNameNode
2943 ResourceManager

# 查看slave1节点
[hadoop@slave1 ~]$ jps
2512 DataNode
2756 Jps
2623 NodeManager

# 查看slave2节点
[hadoop@slave2 ~]$ jps
3379 Jps
3239 NodeManager
3135 DataNode

#确保master上有NameNode、SecondaryNameNode、 ResourceManager进程, slave节点上要有DataNode、NodeManager进程

六、启动hbase

[hadoop@master ~]$ start-hbase.sh
[hadoop@master ~]$ jps
3569 HMaster
2571 NameNode
2780 SecondaryNameNode
3692 Jps
2943 ResourceManager
3471 HQuorumPeer

# 查看slave1节点
[hadoop@slave1 ~]$ jps
2512 DataNode
2818 HQuorumPeer
2933 HRegionServer
3094 Jps
2623 NodeManager

# 查看slave2节点
[hadoop@slave2 ~]$ jps
3239 NodeManager
3705 Jps
3546 HRegionServer
3437 HQuorumPeer
3135 DataNode

# 确保master上有HQuorumPeer、HMaster进程,slave节点上要有HQuorumPeer、HRegionServer进程

七、查看浏览器页面

声明:未经许可,不得转载

版权声明:本文为今天你内卷了吗原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://www.cnblogs.com/wzgwzg/p/16183617.html