您好,登錄后才能下訂單哦!
操作系統(tǒng)版本:
centos7 64位
hadoop版本:
hadoop-2.7.3
hbase版本:
hbase-1.2.4
機(jī)器:
192.168.11.131 master1 Namenode ResourceManager QuorumPeerMain Jobhistory HMaster DFSZKFailoverController
192.168.11.132 master2 Namenode HMaster DFSZKFailoverController
192.168.11.133 slave1 Datanode HRegionServer NodeManager JournalNode
192.168.11.134 slave2 Datanode HRegionServer NodeManager JournalNode
192.168.11.135 slave3 Datanode HRegionServer NodeManager JournalNode
所有節(jié)點(diǎn)關(guān)閉防火墻及selinux
# firewall-cmd --state
running
# systemctl stop firewalld.service
# systemctl disable firewalld.service
# setenforce 0
# vi /etc/sysconfig/selinux
SELINUX=enforcing --> disabled
所有節(jié)點(diǎn)配置yum源
# cd
# mkdir apps
http://mirrors.163.com/centos/7/os/x86_64/Packages/wget-1.14-15.el7.x86_64.rpm
# rpm -i wget-1.14-15.el7.x86_64.rpm
# cd /etc/yum.repos.d
# wget http://mirrors.aliyun.com/repo/Centos-7.repo
# mv Centos-7.repo CentOS-Base.repo
# scp CentOS-Base.repo root@192.168.11.131:/etc/yum.repos.d/
# scp CentOS-Base.repo root@192.168.11.132:/etc/yum.repos.d/
# scp CentOS-Base.repo root@192.168.11.133:/etc/yum.repos.d/
# scp CentOS-Base.repo root@192.168.11.134:/etc/yum.repos.d/
# yum clean all
# yum makecache
# yum update
配置ntp時(shí)間同步
所有節(jié)點(diǎn)安裝ntp
# yum install -y ntp
ntp server端:
# date -s "2018-05-27 23:03:30"
# vi /etc/ntp.conf
在注釋下添加兩行
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
server 127.127.1.0
fudge 127.127.1.0 stratum 11
注釋下面
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
# systemctl start ntpd.service
# systemctl enable ntpd.service
ntp客戶端(其余四臺(tái)都為ntp客戶端):
# vi /etc/ntp.conf
同樣注釋下添加兩行
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
server 192.168.11.131
fudge 127.127.1.0 stratum 11
四行添加注釋
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
# systemctl start ntpd.service
# systemctl enable ntpd.service
# ntpdate 192.168.11.131
28 May 07:04:50 ntpdate[1714]: the NTP socket is in use, exiting
# lsof -i:123
-bash: lsof: command not found
# yum install -y lsof
# lsof -i:123
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
ntpd 1693 ntp 16u IPv4 25565 0t0 UDP *:ntp
ntpd 1693 ntp 17u IPv6 25566 0t0 UDP *:ntp
ntpd 1693 ntp 18u IPv4 25572 0t0 UDP localhost:ntp
ntpd 1693 ntp 19u IPv4 25573 0t0 UDP localhost.localdomain:ntp
ntpd 1693 ntp 20u IPv6 25574 0t0 UDP localhost:ntp
ntpd 1693 ntp 21u IPv6 25575 0t0 UDP localhost.localdomain:ntp
# kill -9 1693
# ntpdate 192.168.11.131
27 May 23:06:14 ntpdate[1728]: step time server 192.168.11.131 offset -28808.035509 sec
# date
Sun May 27 23:06:17 CST 2018
所有節(jié)點(diǎn)修改主機(jī)名(永久)
# hostnamectl set-hostname master1~slave3
臨時(shí)修改主機(jī)名
# hostname master1~slave3
主節(jié)點(diǎn)修改hosts文件
# vi /etc/hosts
192.168.11.131 master1
192.168.11.132 master2
192.168.11.133 slave1
192.168.11.134 slave2
192.168.11.135 slave3
把hosts文件覆蓋到其他機(jī)器
# scp /etc/hosts root@192.168.11.132~135:/etc/
所有節(jié)點(diǎn)創(chuàng)建管理用戶和組
創(chuàng)建組和用戶
# groupadd hduser
# useradd -g hduser hduser
# passwd hduser
創(chuàng)建目錄并賦權(quán)
每臺(tái)機(jī)器上創(chuàng)建如下文件夾
# mkdir /data1
# mkdir /data2
修改權(quán)限
# chown hudser:hduser /data1
# chown hudser:hduser /data2
# su hduser
$ mkdir -p /data1/hadoop_data/hdfs/namenode
$ mkdir -p /data2/hadoop_data/hdfs/namenode
$ mkdir -p /data1/hadoop_data/hdfs/datanode(NameNode不要)
$ mkdir -p /data2/hadoop_data/hdfs/datanode(NameNode不要)
$ mkdir -p /data1/hadoop_data/pids
$ mkdir -p /data2/hadoop_data/pids
$ mkdir -p /data1/hadoop_data/hadoop_tmp
$ mkdir -p /data2/hadoop_data/hadoop_tmp
無(wú)密驗(yàn)證
master1和master2節(jié)點(diǎn)操作
# su - hduser
$ ssh-keygen -t rsa
$ cd ~/.ssh
$ cat id_rsa.pub >> authorized_keys
master1節(jié)點(diǎn)操作
$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@master2
master2節(jié)點(diǎn)操作
$ scp ~/.ssh/authorized_keys hduser@master1:~/.ssh/
slave1、slave2和slave3節(jié)點(diǎn)創(chuàng)建.ssh目錄
# mkdir /home/hduser/.ssh
# chown hduser:hduser /home/hduser/.ssh
master1節(jié)點(diǎn)操作
$ scp ~/.ssh/authorized_keys hduser@slave1:~/.ssh
$ scp ~/.ssh/authorized_keys hduser@slave2:~/.ssh
$ scp ~/.ssh/authorized_keys hduser@slave3:~/.ssh
master1和master2節(jié)點(diǎn)驗(yàn)證
驗(yàn)證方法,分別在兩個(gè)節(jié)點(diǎn),ssh登陸本機(jī)(hdusser用戶)及其他四個(gè)節(jié)點(diǎn),看看是不是無(wú)密登陸。
如果未通過驗(yàn)證,所有機(jī)器執(zhí)行下面命令
$ chmod 600 ~/.ssh/authorized_keys
$ chmod 700 ~/.ssh
所有節(jié)點(diǎn)配置java環(huán)境
$ mkdir -p /data1/usr/src
上傳包到/data1/usr/src目錄下
$ cd /data1/usr/src
$ tar xf jdk1.7.0_79.tar -C /data1/usr/
$ vi ~/.bashrc
export JAVA_HOME=/data1/usr/jdk1.7.0_79
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib/rt.jar
export PATH=$PATH:$JAVA_HOME/bin
$ source ~/.bashrc
mastar1節(jié)點(diǎn)配置hadoop(hdsuer用戶)
下載hadoop-2.7.3.tar.gz,上傳到/data1/usr/src
http://mirrors.cnnic.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
$ cd /data1/usr/src
$ tar -zxf hadoop-2.7.3.tar.gz -C /data1/usr/
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/data1/usr/jdk1.7.0_79
export HADOOP_PID_DIR=/data1/hadoop_data/pids
export HADOOP_PID_DIR=/data2/hadoop_data/pids
export HADOOP_MAPRED_PID_DIR=/data1/hadoop_data/pids
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/mapred-env.sh
export HADOOP_MAPRED_PID_DIR=/data2/hadoop_data/pids
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/yarn-env.sh
export YARN_PID_DIR=/data2/hadoop_data/pids
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/core-site.xml
<configuration>
<!-- 指定hdfs的nameservice為masters -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://masters</value>
</property>
<!-- 指定hadoop運(yùn)行時(shí)產(chǎn)生文件的存儲(chǔ)目錄 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/data2/hadoop_data/hadoop_tmp</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>master1:2181,master2:2181,slave1:2181,slave2:2181,slave3:2181</value>
</property>
</configuration>
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/hdfs-site.xml
<configuration>
<!--指定hdfs的nameservice為masters,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>masters</value>
</property>
<!-- h2下面有兩個(gè)NameNode,分別是master1,master2 -->
<property>
<name>dfs.ha.namenodes.masters</name>
<value>master1,master2</value>
</property>
<!-- master1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.masters.master1</name>
<value>master1:9000</value>
</property>
<!-- master1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.masters.master1</name>
<value>master1:50070</value>
</property>
<!-- master2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.masters.master2</name>
<value>master2:9000</value>
</property>
<!-- master2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.masters.master2</name>
<value>master2:50070</value>
</property>
<!-- 指定NameNode的存放位置 -->
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///data2/hadoop_data/hdfs/namenode</value>
</property>
<!-- 指定DataNode的存放位置 -->
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///data1/hadoop_data/hdfs/datanode,data2/hadoop_data/hdfs/datanode</value>
</property>
<!-- 指定NameNode的元數(shù)據(jù)在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://slave1:8485;slave2:8485;slave3:8485/masters</value>
</property>
<!-- 指定JournalNode在本地磁盤存放數(shù)據(jù)的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data2/hadoop_data/journal</value>
</property>
<!-- 開啟NameNode失敗自動(dòng)切換 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 配置失敗自動(dòng)切換實(shí)現(xiàn)方式 -->
<property>
<name>dfs.client.failover.proxy.provider.masters</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔離機(jī)制方法,多個(gè)機(jī)制用換行分割,即每個(gè)機(jī)制暫用一行-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!-- 使用sshfence隔離機(jī)制時(shí)需要ssh免登陸 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hduser/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔離機(jī)制超時(shí)時(shí)間 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 這個(gè)地方是為Hbase的專用配置,最小為4096,表示同時(shí)處理文件的上限,不配置會(huì)報(bào)錯(cuò) -->
<property>
<name>dfs.datanode.max.xcievers</name>
<value>8192</value>
</property>
<property>
<name>dfs.qjournal.write-txns.timeout.ms</name>
<value>60000</value>
</property>
</configuration>
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/yarn-site.xml
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- 開啟RM高可靠 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!-- 指定RM的cluster id -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>RM_HA_ID</value>
</property>
<!-- 指定RM的名字 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!-- 分別指定RM的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>master1</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>master2</value>
</property>
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<!-- 指定zk集群地址 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>master1:2181,master2:2181,slave1:2181,slave2:2181,slave3:2181</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
$ cp /data1/usr/hadoop-2.7.3/etc/hadoop/mapred-site.xml.template /data1/usr/hadoop-2.7.3/etc/hadoop/mapred-site.xml
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/mapred-site.xml
<configuration>
<!-- 指定mr框架為yarn方式 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
$ vi /data1/usr/hadoop-2.7.3/etc/hadoop/slaves
slave3
slave4
slave5
$ for ip in `seq 2 5`;do scp -rpq /data1/usr/hadoop-2.7.3 192.168.11.13$ip:/data1/usr;done
各節(jié)點(diǎn)zookeeper配置
http://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz
上傳包到/data1/usr/src目錄下
創(chuàng)建目錄
$ mkdir -p /home/hduser/storage/zookeeper
$ cd /data1/usr/src
$ tar -zxf zookeeper-3.4.6.tar.gz -C /data1/usr
$ cp /data1/usr/zookeeper-3.4.6/conf/zoo_sample.cfg /data1/usr/zookeeper-3.4.6/conf/zoo.cfg
$ vi /data1/usr/zookeeper-3.4.6/conf/zoo.cfg
dataDir=/home/hduser/storage/zookeeper
server.1=master1:2888:3888
server.2=master2:2888:3888
server.3=slave1:2888:3888
server.4=slave2:2888:3888
server.5=slave3:2888:3888
master1-slave3各節(jié)點(diǎn)依次做操作
$ echo "1" > /home/hduser/storage/zookeeper/myid
$ echo "2" > /home/hduser/storage/zookeeper/myid
$ echo "3" > /home/hduser/storage/zookeeper/myid
$ echo "4" > /home/hduser/storage/zookeeper/myid
$ echo "5" > /home/hduser/storage/zookeeper/myid
$ cd /data1/usr/zookeeper-3.4.6/bin
$ ./zkServer.sh start
slave1、slave2和slave3啟動(dòng)journalnode
$ cd /data1/usr/hadoop-2.7.3/sbin
$ ./sbin/hadoop-daemon.sh start journalnode
用jps確認(rèn)啟動(dòng)結(jié)果
在master1上格式化zookeeper節(jié)點(diǎn)格式化(第一次)
$ cd /data1/usr/hadoop-2.7.3
$ ./bin/hdfs zkfc -formatZK
在master1上執(zhí)行命令:
./bin/hadoop namenode -format
在master1上啟動(dòng)namenode
./sbin/hadoop-daemon.sh start namenode
需要在master2(備節(jié)點(diǎn))上執(zhí)行數(shù)據(jù)同步
./bin/hdfs namenode –bootstrapStandby
scp -r /data2/hadoop_data/hdfs/namenode hduser@mster2:/data2/hadoop_data/hdfs/
在master2上啟動(dòng)namenode
./sbin/hadoop-daemon.sh start namenode
設(shè)置master1為active
./bin/hdfs haadmin -transitionToActive master1
./bin/hdfs haadmin -getServiceState master1
在master1上啟動(dòng)datanode
./sbin/hadoop-daemons.sh start datanode
啟動(dòng)HDFS(第二次以后)
在master1上執(zhí)行命令:
./sbin/start-dfs.sh
啟動(dòng)YARN
在master1上執(zhí)行命令:
./sbin/start-yarn.sh
驗(yàn)證
驗(yàn)證namenode
http://master1:50070
Overview 'master1:9000' (active)
http://master2:50070
Overview 'master2:9000' (standby)
上傳文件
./bin/hadoop fs -put /data1/usr/hadoop-2.7.3/etc/hadoop /test
./bin/hadoop fs -ls /test
namenode的備份驗(yàn)證
殺死m(xù)aster1,master2變?yōu)閍ctive
驗(yàn)證yarn
./bin/hadoop jar /data1/usr/hadoop-2.7.3/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar wordcount /test/hadoop /test/out
安裝HBASE
master1節(jié)點(diǎn)操作:
下載hbase-1.2.4-bin.tar.gz,解壓
$ cd /data1/usr/src
$ tar -zxvf hbase-1.2.4-bin.tar.gz -C /data1/usr/
$ mkdir -p /data1/hadoop_data/hbase_tmp
$ mkdir -p /data2/hadoop_data/hbase_tmp
配置master1的hbase環(huán)境
配置hbase-env.sh
$ vi /data1/usr/hbase-1.2.4/conf/hbase-env.sh
export JAVA_HOME=/data1/usr/jdk1.7.0_79
export HBASE_PID_DIR=/data2/hadoop_data/pids
export HBASE_MANAGES_ZK=false
export HADOOP_HOME=/data1/usr/hadoop-2.7.3
配置hbase-site.xml
$ vi /data1/usr/hbase-1.2.4/conf/hbase-site.xml
<!-- 指定HBase在HDFS上面創(chuàng)建的目錄名hbase -->
<property>
<name>hbase.rootdir</name>
<value>hdfs://masters/hbase</value>
</property>
<!-- 開啟集群運(yùn)行方式 -->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.master.port</name>
<value>60000</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/data2/hadoop_data/hbase_tmp</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>master1,master2,slave1,slave2,slave3</value>
</property>
配置regionservers
$ vi /data1/usr/hbase-1.2.4/conf/regionservers
slave1
slave2
slave3
配置backup-masters
$ vi /data1/usr/hbase-1.2.4/conf/backup-masters
移除 HBase 里面的不必要 log4j 的 jar 包
cd ${HBASE_HOME}/lib
mv slf4j-log4j12-1.7.5.jar slf4j-log4j12-1.7.5.jar.bak
將master1的hbase環(huán)境傳輸?shù)狡渌?jié)點(diǎn)
$ for ip in `seq 2 5`;do scp -rpq /data1/usr/hbase-1.2.4 192.168.11.13$ip:/data1/usr;done
啟動(dòng)順序
按hadoop集群的啟動(dòng)步驟,啟動(dòng)hadoop集群
master1上啟動(dòng)Hbase
$ cd /data1/usr/hbase-1.2.4/bin
$ ./start-hbase.sh
驗(yàn)證
$ /data1/usr/hadoop-2.7.3/bin/hadoop fs -ls / 查看hbase是否在HDFS文件系統(tǒng)創(chuàng)建成功
執(zhí)行: bin/hbase shell 可以進(jìn)入Hbase管理界面、
輸入 status 查看狀態(tài)
創(chuàng)建表
create 'test', 'cf'
顯示表信息
list 'test'
表中插入數(shù)據(jù)
put 'test', 'row1', 'cf:a', 'value1'
put 'test', 'row2', 'cf:b', 'value2'
put 'test', 'row3', 'cf:c', 'value3'
查詢表
scan 'test'
取一行數(shù)據(jù)
get 'test', 'row1'
失效表
disable 'test'
刪除表
drop 'test'
瀏覽器輸入http://master1:16010可以打開Hbase管理界面
http://192.168.11.131/master-status
啟動(dòng)thrift2
hbase-daemons.sh start thrift2
去datanode節(jié)點(diǎn)用jps確認(rèn)
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。