溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊(cè)×
其他方式登錄
點(diǎn)擊 登錄注冊(cè) 即表示同意《億速云用戶服務(wù)條款》

kafka單機(jī)版環(huán)境的搭建方法

發(fā)布時(shí)間:2021-08-12 10:56:29 來(lái)源:億速云 閱讀:127 作者:chen 欄目:云計(jì)算

這篇文章主要介紹“kafka單機(jī)版環(huán)境的搭建方法”,在日常操作中,相信很多人在kafka單機(jī)版環(huán)境的搭建方法問(wèn)題上存在疑惑,小編查閱了各式資料,整理出簡(jiǎn)單好用的操作方法,希望對(duì)大家解答”kafka單機(jī)版環(huán)境的搭建方法”的疑惑有所幫助!接下來(lái),請(qǐng)跟著小編一起來(lái)學(xué)習(xí)吧!

1、啟動(dòng)安裝zookeeper

[root@node1 bin]# ./zkServer.sh 

JMX enabled by default

Using config: /opt/bigdata/zookeeper/bin/../conf/zoo.cfg

Usage: ./zkServer.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}

[root@node1 bin]# ./zkServer.sh  start-foreground

JMX enabled by default

Using config: /opt/bigdata/zookeeper/bin/../conf/zoo.cfg

kafka環(huán)境搭建:

[root@node1 bin]# ./kafka-server-start.sh ../config/server.properties

[2016-04-02 04:10:29,995] INFO KafkaConfig values: 

        request.timeout.ms = 30000

        log.roll.hours = 168

        inter.broker.protocol.version = 0.9.0.X

        log.preallocate = false

        security.inter.broker.protocol = PLAINTEXT

        controller.socket.timeout.ms = 30000

        broker.id.generation.enable = true

        ssl.keymanager.algorithm = SunX509

        ssl.key.password = null

        log.cleaner.enable = true

        ssl.provider = null

        num.recovery.threads.per.data.dir = 1

        background.threads = 10

        unclean.leader.election.enable = true

        sasl.kerberos.kinit.cmd = /usr/bin/kinit

        replica.lag.time.max.ms = 10000

        ssl.endpoint.identification.algorithm = null

        auto.create.topics.enable = true

        zookeeper.sync.time.ms = 2000

        ssl.client.auth = none

        ssl.keystore.password = null

        log.cleaner.io.buffer.load.factor = 0.9

        offsets.topic.compression.codec = 0

        log.retention.hours = 168

        log.dirs = /tmp/kafka-logs

        ssl.protocol = TLS

        log.index.size.max.bytes = 10485760

        sasl.kerberos.min.time.before.relogin = 60000

        log.retention.minutes = null

        connections.max.idle.ms = 600000

        ssl.trustmanager.algorithm = PKIX

        offsets.retention.minutes = 1440

        max.connections.per.ip = 2147483647

        replica.fetch.wait.max.ms = 500

        metrics.num.samples = 2

        port = 9092

        offsets.retention.check.interval.ms = 600000

        log.cleaner.dedupe.buffer.size = 134217728

        log.segment.bytes = 1073741824

        group.min.session.timeout.ms = 6000

        producer.purgatory.purge.interval.requests = 1000

        min.insync.replicas = 1

        ssl.truststore.password = null

        log.flush.scheduler.interval.ms = 9223372036854775807

        socket.receive.buffer.bytes = 102400

        leader.imbalance.per.broker.percentage = 10

        num.io.threads = 8

        zookeeper.connect = localhost:2181

        queued.max.requests = 500

        offsets.topic.replication.factor = 3

        replica.socket.timeout.ms = 30000

        offsets.topic.segment.bytes = 104857600

        replica.high.watermark.checkpoint.interval.ms = 5000

        broker.id = 0

        ssl.keystore.location = null

        listeners = PLAINTEXT://:9092

        log.flush.interval.messages = 9223372036854775807

        principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder

        log.retention.ms = null

        offsets.commit.required.acks = -1

        sasl.kerberos.principal.to.local.rules = [DEFAULT]

        group.max.session.timeout.ms = 30000

        num.replica.fetchers = 1

        advertised.listeners = null

        replica.socket.receive.buffer.bytes = 65536

        delete.topic.enable = false

        log.index.interval.bytes = 4096

        metric.reporters = []

        compression.type = producer

        log.cleanup.policy = delete

        controlled.shutdown.max.retries = 3

        log.cleaner.threads = 1

        quota.window.size.seconds = 1

        zookeeper.connection.timeout.ms = 6000

        offsets.load.buffer.size = 5242880

        zookeeper.session.timeout.ms = 6000

        ssl.cipher.suites = null

        authorizer.class.name = 

        sasl.kerberos.ticket.renew.jitter = 0.05

        sasl.kerberos.service.name = null

        controlled.shutdown.enable = true

        offsets.topic.num.partitions = 50

        quota.window.num = 11

        message.max.bytes = 1000012

        log.cleaner.backoff.ms = 15000

        log.roll.jitter.hours = 0

        log.retention.check.interval.ms = 300000

        replica.fetch.max.bytes = 1048576

        log.cleaner.delete.retention.ms = 86400000

        fetch.purgatory.purge.interval.requests = 1000

        log.cleaner.min.cleanable.ratio = 0.5

        offsets.commit.timeout.ms = 5000

        zookeeper.set.acl = false

        log.retention.bytes = -1

        offset.metadata.max.bytes = 4096

        leader.imbalance.check.interval.seconds = 300

        quota.consumer.default = 9223372036854775807

        log.roll.jitter.ms = null

        reserved.broker.max.id = 1000

        replica.fetch.backoff.ms = 1000

        advertised.host.name = null

        quota.producer.default = 9223372036854775807

        log.cleaner.io.buffer.size = 524288

        controlled.shutdown.retry.backoff.ms = 5000

        log.dir = /tmp/kafka-logs

        log.flush.offset.checkpoint.interval.ms = 60000

        log.segment.delete.delay.ms = 60000

        num.partitions = 1

        num.network.threads = 3

        socket.request.max.bytes = 104857600

        sasl.kerberos.ticket.renew.window.factor = 0.8

        log.roll.ms = null

        ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]

        socket.send.buffer.bytes = 102400

        log.flush.interval.ms = null

        ssl.truststore.location = null

        log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308

        default.replication.factor = 1

        metrics.sample.window.ms = 30000

        auto.leader.rebalance.enable = true

        host.name = 

        ssl.truststore.type = JKS

        advertised.port = null

        max.connections.per.ip.overrides = 

        replica.fetch.min.bytes = 1

        ssl.keystore.type = JKS

 (kafka.server.KafkaConfig)

3、創(chuàng)建topic

[root@node1 bin]# ./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

Created topic "test".

4、列出topic列表

[root@node1 bin]# ./kafka-topics.sh --list --zookeeper localhost:2181

test

5、生產(chǎn)消息

[root@node1 bin]# ./kafka-console-producer.sh --broker-list localhost:9092 --topic test

fhgfhfgh\

gjgjhgjg

gjhgjkghk

nvnbv

6、消費(fèi)消息

[root@node1 ~]# cd /opt/bigdata/kafka/bin/

[root@node1 bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

fhgfhfgh\

gjgjhgjg

gjhgjkghk

nvnbv

到此,關(guān)于“kafka單機(jī)版環(huán)境的搭建方法”的學(xué)習(xí)就結(jié)束了,希望能夠解決大家的疑惑。理論與實(shí)踐的搭配能更好的幫助大家學(xué)習(xí),快去試試吧!若想繼續(xù)學(xué)習(xí)更多相關(guān)知識(shí),請(qǐng)繼續(xù)關(guān)注億速云網(wǎng)站,小編會(huì)繼續(xù)努力為大家?guī)?lái)更多實(shí)用的文章!

向AI問(wèn)一下細(xì)節(jié)

免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。

AI