TOP

HadoopHA3_hadoop高可用的配置文件
2019-05-15 00:21:49 】 浏览:173
Tags:HadoopHA3_hadoop 可用 配置 文件

前言

此篇文章内容是HadoopHA的相关配置文件
安装流程请参见
安装windows+linux双系统:https://blog.csdn.net/IAmListening/article/details/89741854
搭建时间同步服务器和yum源:https://blog.csdn.net/IAmListening/article/details/89765148
搭建hadoop高可用:https://blog.csdn.net/IAmListening/article/details/89765843
Hive的安装和相关异常:https://blog.csdn.net/IAmListening/article/details/89368823
安装hbase:https://blog.csdn.net/IAmListening/article/details/89765957

vi hadoop-env.sh

# 指定JAVA-HOME
export JAVA_HOME=/apps/jdk1.8.0_60

vi core-site.xml

<configuration>
<property>
  
  <name>fs.defaultFSname>
  <value>hdfs://bg19value>
property>

<property>
  
   <name>ha.zookeeper.quorumname>
   <value>slave1:2181,slave2:2181,slave3:2181value>
property>

<property>
  
  <name>hadoop.tmp.dirname>
  <value>/software/hadoopdata/tmpvalue>
property>

<property>
  
  <name>dfs.permissions.enabledname>
  <value>falsevalue>
property>

<property>
  
  <name>fs.trash.intervalname>
  <value>36000000value>
property>

<property>
  
  <name>fs.trash.checkpoint.intervalname>
  <value>3600value>
property>

vi hdfs-site.xml

<configuration>

<property>

<name>dfs.nameservicesname>
<value>bg19value>
property>

<property>

<name>dfs.ha.namenodes.bg19name>
<value>nn1,nn2value>
property>


<property>
<name>dfs.namenode.rpc-address.bg19.nn1name>
<value>master1:9000value>
property>
<property>
<name>dfs.namenode.http-address.bg19.nn1name>
<value>master1:50070value>
property>


<property>
<name>dfs.namenode.rpc-address.bg19.nn2name>
<value>master2:9000value>
property>
<property>
<name>dfs.namenode.http-address.bg19.nn2name>
<value>master2:50070value>
property>


<property>
<name>dfs.datanode.data.dirname>
<value>/software/hadoop/datavalue>
property>
<property>
<name>dfs.namenode.name.dirname>
<value>/software/hadoop/namevalue>
property>
<property>


<name>dfs.namenode.shared.edits.dirname>
<value>qjournal://slave1:8485;slave2:8485;slave3:8485/bg19value>
property>
<property>

<name>dfs.journalnode.edits.dirname>
<value>/software/hadoop/journaldatavalue>
property>


<property>

<name>dfs.ha.automatic-failover.enabledname>
<value>truevalue>
property>
<property>

<name>dfs.client.failover.proxy.provider.bg19name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvidervalue>
property>
<property>

<name>dfs.ha.fencing.methodsname>
<value>
sshfence
shell(/bin/true)
value>
property>
<property>

<name>dfs.ha.fencing.ssh.private-key-filesname>
<value>/root/.ssh/id_rsavalue>
property>
<property>

<name>dfs.ha.fencing.ssh.connect-timeoutname>
<value>30000value>
property>

configuration>

vi mapred-site.xml

<configuration>

<property>

<name>mapreduce.framework.namename>
<value>yarnvalue>
property>

configuration>

vi yarn-site.xml

<configuration>

<property>

<name>yarn.nodemanager.aux-servicesname>
<value>mapreduce_shufflevalue>
property>

<property>

<name>yarn.resourcemanager.ha.enabledname>
<value>truevalue>
property>

<property>

<name>yarn.resourcemanager.cluster-idname>
<value>bg_hyarnvalue>
property>


<property>
<name>yarn.resourcemanager.ha-idname>
<value>rmvalue>
property>
<property>
<name>yarn.resourcemanager.ha.rm-idsname>
<value>rm1,rm2value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm1name>

<value>master2value>
property>
<property>
<name>yarn.resourcemanager.hostname.rm2name>
<value>master1value>
property>

<property>
<name>yarn.resourcemanager.webapp.address.rm1name>
<value>master2:8088value>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1name>
<value>master2:8090value>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2name>
<value>master1:8088value>
property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2name>
<value>master1:8090value>
property>

<property>

<name>yarn.resourcemanager.zk-addressname>
<value>slave1:2181,slave2:2181,slave3:2181value>
property>

configuration>

vi salves

slaver1
slaver2
slaver3

HadoopHA3_hadoop高可用的配置文件 https://www.cppentry.com/bencandy.php?fid=114&id=224143

】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇史上最详细的Hadoop环境搭建(三) 下一篇Hadoop集群内lzo的安装与配置详解

kafka-
kafka   Partit
解决android studio
Kafka史上最详细原理
Error while fetchin
【Kafka】安装与快速
    &
flume读取日志数据写
Authentication plug
Flume 自定义source
flume   三大核
ICC副本>>>
愚公移山  
Hbase架构   Hb
5 hbase-shell + &
Hbase   MapRed
MetaException(messa
Exception in thread
HIVE metastore Dupl
-->