General broker settings
# Zookeeper quorum connection stringzookeeperServers=192.168.33.3:2181,192.168.33.4:2181,192.168.33.5:2181# Configuration Store connection stringconfigurationStoreServers=192.168.33.3:2181,192.168.33.4:2181,192.168.33.5:2181# Broker data portbrokerServicePort=6650# Broker data port for TLS - By default TLS is disabledbrokerServicePortTls=# Port to use to server HTTP requestwebServicePort=8081# Port to use to server HTTPS request - By default TLS is disabledwebServicePortTls=# Hostname or IP address the service binds on, default is 0.0.0.0.bindAddress=0.0.0.0# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used.advertisedAddress=192.168.33.5# Used to specify multiple advertised listeners for the broker.# The value must format as <listener_name>:pulsar://<host>:<port>,# multiple listeners should separate with commas.# Do not use this configuration with advertisedAddress and brokerServicePort.# The Default value is absent means use advertisedAddress and brokerServicePort.# advertisedListeners=# Used to specify the internal listener name for the broker.# The listener name must contain in the advertisedListeners.# The Default value is absent, the broker uses the first listener as the internal listener.# internalListenerName=# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors()numIOThreads=# Number of threads to use for ordered executor. The ordered executor is used to operate with zookeeper,# such as init zookeeper client, get namespace policies from zookeeper etc. It also used to split bundle. Default is 8numOrderedExecutorThreads=8# Number of threads to use for HTTP requests processing. Default is set to 2 * Runtime.getRuntime().availableProcessors()numHttpServerThreads=# Number of thread pool size to use for pulsar broker service.# The executor in thread pool will do basic broker operation like load/unload bundle, update managedLedgerConfig,# update topic/subscription/replicator message dispatch rate, do leader election etc.# Default is Runtime.getRuntime().availableProcessors()numExecutorThreadPoolSize=# Number of thread pool size to use for pulsar zookeeper callback service# The cache executor thread pool is used for restarting global zookeeper session.# Default is 10numCacheExecutorThreadPoolSize=10# Max concurrent web requestsmaxConcurrentHttpRequests=1024# Flag to control features that are meant to be used when running in standalone modeisRunningStandalone=# Name of the cluster to which this broker belongs toclusterName=pulsar-cluster# The maximum number of tenants that each pulsar cluster can create# This configuration is not precise control, in a concurrent scenario, the threshold will be exceededmaxTenants=0# Enable cluster's failure-domain which can distribute brokers into logical regionfailureDomainsEnabled=false# Zookeeper session timeout in millisecondszooKeeperSessionTimeoutMillis=30000# ZooKeeper operation timeout in secondszooKeeperOperationTimeoutSeconds=30# ZooKeeper cache expiry time in secondszooKeeperCacheExpirySeconds=300# Time to wait for broker graceful shutdown. After this time elapses, the process will be killedbrokerShutdownTimeoutMs=60000# Flag to skip broker shutdown when broker handles Out of memory errorskipBrokerShutdownOnOOM=false# Enable backlog quota check. Enforces action on topic when the quota is reachedbacklogQuotaCheckEnabled=true# How often to check for topics that have reached the quotabacklogQuotaCheckIntervalInSeconds=60# Default per-topic backlog quota limit, less than 0 means no limitation. default is -1.backlogQuotaDefaultLimitGB=-1# Default backlog quota retention policy. Default is producer_request_hold# 'producer_request_hold' Policy which holds producer's send request until the resource becomes available (or holding times out)# 'producer_exception' Policy which throws javax.jms.ResourceAllocationException to the producer# 'consumer_backlog_eviction' Policy which evicts the oldest message from the slowest consumer's backlogbacklogQuotaDefaultRetentionPolicy=producer_request_hold# Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0)ttlDurationDefaultInSeconds=0# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false)allowAutoTopicCreation=true# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned)allowAutoTopicCreationType=non-partitioned# Enable subscription auto creation if new consumer connected (disable auto creation with value false)allowAutoSubscriptionCreation=true# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned.defaultNumPartitions=1# Enable the deletion of inactive topicsbrokerDeleteInactiveTopicsEnabled=true# How often to check for inactive topicsbrokerDeleteInactiveTopicsFrequencySeconds=60# Set the inactive topic delete mode. Default is delete_when_no_subscriptions# 'delete_when_no_subscriptions' mode only delete the topic which has no subscriptions and no active producers# 'delete_when_subscriptions_caught_up' mode only delete the topic that all subscriptions has no backlogs(caught up)# and no active producers/consumersbrokerDeleteInactiveTopicsMode=delete_when_no_subscriptions# Metadata of inactive partitioned topic will not be cleaned up automatically by default.# Note: If `allowAutoTopicCreation` and this option are enabled at the same time,# it may appear that a partitioned topic has just been deleted but is automatically created as a non-partitioned topic.brokerDeleteInactivePartitionedTopicMetadataEnabled=false# Max duration of topic inactivity in seconds, default is not present# If not present, 'brokerDeleteInactiveTopicsFrequencySeconds' will be used# Topics that are inactive for longer than this value will be deletedbrokerDeleteInactiveTopicsMaxInactiveDurationSeconds=# Max pending publish requests per connection to avoid keeping large number of pending# requests in memory. Default: 1000maxPendingPublishdRequestsPerConnection=1000# How frequently to proactively check and purge expired messagesmessageExpiryCheckIntervalInMinutes=5# How long to delay rewinding cursor and dispatching messages when active consumer is changedactiveConsumerFailoverDelayTimeMillis=1000# How long to delete inactive subscriptions from last consuming# When it is 0, inactive subscriptions are not deleted automaticallysubscriptionExpirationTimeMinutes=0# Enable subscription message redelivery tracker to send redelivery count to consumer (default is enabled)subscriptionRedeliveryTrackerEnabled=true# How frequently to proactively check and purge expired subscriptionsubscriptionExpiryCheckIntervalInMinutes=5# Enable Key_Shared subscription (default is enabled)subscriptionKeySharedEnable=true# On KeyShared subscriptions, with default AUTO_SPLIT mode, use splitting ranges or# consistent hashing to reassign keys to new consumerssubscriptionKeySharedUseConsistentHashing=false# On KeyShared subscriptions, number of points in the consistent-hashing ring.# The higher the number, the more equal the assignment of keys to consumerssubscriptionKeySharedConsistentHashingReplicaPoints=100# Set the default behavior for message deduplication in the broker# This can be overridden per-namespace. If enabled, broker will reject# messages that were already stored in the topicbrokerDeduplicationEnabled=false# Maximum number of producer information that it's going to be# persisted for deduplication purposesbrokerDeduplicationMaxNumberOfProducers=10000# How often is the thread pool scheduled to check whether a snapshot needs to be taken.(disable with value 0)brokerDeduplicationSnapshotFrequencyInSeconds=10# If this time interval is exceeded, a snapshot will be taken.# It will run simultaneously with `brokerDeduplicationEntriesInterval`brokerDeduplicationSnapshotIntervalSeconds=120# Number of entries after which a dedup info snapshot is taken.# A larger interval will lead to fewer snapshots being taken, though it would# increase the topic recovery time when the entries published after the# snapshot need to be replayed.brokerDeduplicationEntriesInterval=1000# Time of inactivity after which the broker will discard the deduplication information# relative to a disconnected producer. Default is 6 hours.brokerDeduplicationProducerInactivityTimeoutMinutes=360# When a namespace is created without specifying the number of bundle, this# value will be used as the defaultdefaultNumberOfNamespaceBundles=4# The maximum number of namespaces that each tenant can create# This configuration is not precise control, in a concurrent scenario, the threshold will be exceededmaxNamespacesPerTenant=0# Enable check for minimum allowed client library versionclientLibraryVersionCheckEnabled=false# Path for the file used to determine the rotation status for the broker when responding# to service discovery health checksstatusFilePath=# If true, (and ModularLoadManagerImpl is being used), the load manager will attempt to# use only brokers running the latest software version (to minimize impact to bundles)preferLaterVersions=false# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending# messages to consumer once, this limit reaches until consumer starts acknowledging messages back.# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restrictionmaxUnackedMessagesPerConsumer=50000# Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to# all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and# unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit# check and dispatcher can dispatch messages without any restrictionmaxUnackedMessagesPerSubscription=200000# Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching# messages to all shared subscription which has higher number of unack messages until subscriptions start# acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling# unackedMessage-limit check and broker doesn't block dispatchersmaxUnackedMessagesPerBroker=0# Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages# than this percentage limit and subscription will not receive any new messages until that subscription acks back# limit/2 messagesmaxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16# Tick time to schedule task that checks topic publish rate limiting across all topics# Reducing to lower value can give more accuracy while throttling publish but# it uses more CPU to perform frequent check. (Disable publish throttling with value 0)topicPublisherThrottlingTickTimeMillis=10# Tick time to schedule task that checks broker publish rate limiting across all topics# Reducing to lower value can give more accuracy while throttling publish but# it uses more CPU to perform frequent check. (Disable publish throttling with value 0)brokerPublisherThrottlingTickTimeMillis=50# Max Rate(in 1 seconds) of Message allowed to publish for a broker if broker publish rate limiting enabled# (Disable message rate limit with value 0)brokerPublisherThrottlingMaxMessageRate=0# Max Rate(in 1 seconds) of Byte allowed to publish for a broker if broker publish rate limiting enabled.# (Disable byte rate limit with value 0)brokerPublisherThrottlingMaxByteRate=0# Max Rate(in 1 seconds) of Message allowed to publish for a topic if topic publish rate limiting enabled# (Disable byte rate limit with value 0)maxPublishRatePerTopicInMessages=0#Max Rate(in 1 seconds) of Byte allowed to publish for a topic if topic publish rate limiting enabled.# (Disable byte rate limit with value 0)maxPublishRatePerTopicInBytes=0# Too many subscribe requests from a consumer can cause broker rewinding consumer cursors and loading data from bookies,# hence causing high network bandwidth usage# When the positive value is set, broker will throttle the subscribe requests for one consumer.# Otherwise, the throttling will be disabled. The default value of this setting is 0 - throttling is disabled.subscribeThrottlingRatePerConsumer=0# Rate period for {subscribeThrottlingRatePerConsumer}. Default is 30s.subscribeRatePeriodPerConsumerInSecond=30# Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default# message dispatch-throttlingdispatchThrottlingRatePerTopicInMsg=0# Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling# default message-byte dispatch-throttlingdispatchThrottlingRatePerTopicInByte=0# Default number of message dispatching throttling-limit for a subscription.# Using a value of 0, is disabling default message dispatch-throttling.dispatchThrottlingRatePerSubscriptionInMsg=0# Default number of message-bytes dispatching throttling-limit for a subscription.# Using a value of 0, is disabling default message-byte dispatch-throttling.dispatchThrottlingRatePerSubscriptionInByte=0# Default messages per second dispatch throttling-limit for every replicator in replication.# Using a value of 0, is disabling replication message dispatch-throttlingdispatchThrottlingRatePerReplicatorInMsg=0# Default bytes per second dispatch throttling-limit for every replicator in replication.# Using a value of 0, is disabling replication message-byte dispatch-throttlingdispatchThrottlingRatePerReplicatorInByte=0# Dispatch rate-limiting relative to publish rate.# (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate:# throttle-dispatch-rate = (publish-rate + configured dispatch-rate).dispatchThrottlingRateRelativeToPublishRate=false# By default we enable dispatch-throttling for both caught up consumers as well as consumers who have# backlog.dispatchThrottlingOnNonBacklogConsumerEnabled=true# Max number of entries to read from bookkeeper. By default it is 100 entries.dispatcherMaxReadBatchSize=100# Max size in bytes of entries to read from bookkeeper. By default it is 5MB.dispatcherMaxReadSizeBytes=5242880# Min number of entries to read from bookkeeper. By default it is 1 entries.# When there is an error occurred on reading entries from bookkeeper, the broker# will backoff the batch size to this minimum number."dispatcherMinReadBatchSize=1# Max number of entries to dispatch for a shared subscription. By default it is 20 entries.dispatcherMaxRoundRobinBatchSize=20# Precise dispathcer flow control according to history message number of each entrypreciseDispatcherFlowControl=false# Max number of concurrent lookup request broker allows to throttle heavy incoming lookup trafficmaxConcurrentLookupRequest=50000# Max number of concurrent topic loading request broker allows to control number of zk-operationsmaxConcurrentTopicLoadRequest=5000# Max concurrent non-persistent message can be processed per connectionmaxConcurrentNonPersistentMessagePerConnection=1000# Number of worker threads to serve non-persistent topicnumWorkerThreadsForNonPersistentTopic=8# Enable broker to load persistent topicsenablePersistentTopics=true# Enable broker to load non-persistent topicsenableNonPersistentTopics=true# Enable to run bookie along with brokerenableRunBookieTogether=false# Enable to run bookie autorecovery along with brokerenableRunBookieAutoRecoveryTogether=false# Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers# until the number of connected producers decrease.# Using a value of 0, is disabling maxProducersPerTopic-limit check.maxProducersPerTopic=0# Enforce producer to publish encrypted messages.(default disable).encryptionRequireOnProducer=false# Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers# until the number of connected consumers decrease.# Using a value of 0, is disabling maxConsumersPerTopic-limit check.maxConsumersPerTopic=0# Max number of subscriptions allowed to subscribe to topic. Once this limit reaches, broker will reject# new subscription until the number of subscribed subscriptions decrease.# Using a value of 0, is disabling maxSubscriptionsPerTopic limit check.maxSubscriptionsPerTopic=0# Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers# until the number of connected consumers decrease.# Using a value of 0, is disabling maxConsumersPerSubscription-limit check.maxConsumersPerSubscription=0# Max size of messages.maxMessageSize=5242880# Interval between checks to see if topics with compaction policies need to be compactedbrokerServiceCompactionMonitorIntervalInSeconds=60# Whether to enable the delayed delivery for messages.# If disabled, messages will be immediately delivered and there will# be no tracking overhead.delayedDeliveryEnabled=true# Control the tick time for when retrying on delayed delivery,# affecting the accuracy of the delivery time compared to the scheduled time.# Default is 1 second.delayedDeliveryTickTimeMillis=1000# Whether to enable acknowledge of batch local index.acknowledgmentAtBatchIndexLevelEnabled=false# Enable tracking of replicated subscriptions state across clusters.enableReplicatedSubscriptions=true# Frequency of snapshots for replicated subscriptions tracking.replicatedSubscriptionsSnapshotFrequencyMillis=1000# Timeout for building a consistent snapshot for tracking replicated subscriptions state.replicatedSubscriptionsSnapshotTimeoutSeconds=30# Max number of snapshot to be cached per subscription.replicatedSubscriptionsSnapshotMaxCachedPerSubscription=10# Max memory size for broker handling messages sending from producers.# If the processing message size exceed this value, broker will stop read data# from the connection. The processing messages means messages are sends to broker# but broker have not send response to client, usually waiting to write to bookies.# It's shared across all the topics running in the same broker.# Use -1 to disable the memory limitation. Default is 1/2 of direct memory.maxMessagePublishBufferSizeInMB=# Interval between checks to see if message publish buffer size is exceed the max message publish buffer size# Use 0 or negative number to disable the max publish buffer limiting.messagePublishBufferCheckIntervalInMillis=100# Check between intervals to see if consumed ledgers need to be trimmed# Use 0 or negative number to disable the checkretentionCheckIntervalInSeconds=120# Max number of partitions per partitioned topic# Use 0 or negative number to disable the checkmaxNumPartitionsPerPartitionedTopic=0# There are two policies when zookeeper session expired happens, "shutdown" and "reconnect".# If uses "shutdown" policy, shutdown the broker when zookeeper session expired happens.# If uses "reconnect" policy, try to reconnect to zookeeper server and re-register metadata to zookeeper.# Node: the "reconnect" policy is an experiment featurezookeeperSessionExpiredPolicy=shutdown# Enable or disable system topicsystemTopicEnabled=false# Enable or disable topic level policies, topic level policies depends on the system topic# Please enable the system topic first.topicLevelPoliciesEnabled=false# If a topic remains fenced for this number of seconds, it will be closed forcefully.# If it is set to 0 or a negative number, the fenced topic will not be closed.topicFencingTimeoutSeconds=0### --- Authentication --- #### Role names that are treated as "proxy roles". If the broker sees a request with#role as proxyRoles - it will demand to see a valid original principal.proxyRoles=# If this flag is set then the broker authenticates the original Auth data# else it just accepts the originalPrincipal and authorizes it (if required).authenticateOriginalAuthData=false# Deprecated - Use webServicePortTls and brokerServicePortTls insteadtlsEnabled=false# Tls cert refresh duration in seconds (set 0 to check on every new connection)tlsCertRefreshCheckDurationSec=300# Path for the TLS certificate filetlsCertificateFilePath=# Path for the TLS private key filetlsKeyFilePath=# Path for the trusted TLS certificate file.# This cert is used to verify that any certs presented by connecting clients# are signed by a certificate authority. If this verification# fails, then the certs are untrusted and the connections are dropped.tlsTrustCertsFilePath=# Accept untrusted TLS certificate from client.# If true, a client with a cert which cannot be verified with the# 'tlsTrustCertsFilePath' cert will allowed to connect to the server,# though the cert will not be used for client authentication.tlsAllowInsecureConnection=false# Specify the tls protocols the broker will use to negotiate during TLS handshake# (a comma-separated list of protocol names).# Examples:- [TLSv1.2, TLSv1.1, TLSv1]tlsProtocols=# Specify the tls cipher the broker will use to negotiate during TLS Handshake# (a comma-separated list of ciphers).# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]tlsCiphers=# Trusted client certificates are required for to connect TLS# Reject the Connection if the Client Certificate is not trusted.# In effect, this requires that all connecting clients perform TLS client# authentication.tlsRequireTrustedClientCertOnConnect=false### --- KeyStore TLS config variables --- #### Enable TLS with KeyStore type configuration in broker.tlsEnabledWithKeyStore=false# TLS Provider for KeyStore typetlsProvider=# TLS KeyStore type configuration in broker: JKS, PKCS12tlsKeyStoreType=JKS# TLS KeyStore path in brokertlsKeyStore=# TLS KeyStore password for brokertlsKeyStorePassword=# TLS TrustStore type configuration in broker: JKS, PKCS12tlsTrustStoreType=JKS# TLS TrustStore path in brokertlsTrustStore=# TLS TrustStore password in brokertlsTrustStorePassword=# Whether internal client use KeyStore type to authenticate with Pulsar brokersbrokerClientTlsEnabledWithKeyStore=false# The TLS Provider used by internal client to authenticate with other Pulsar brokersbrokerClientSslProvider=# TLS TrustStore type configuration for internal client: JKS, PKCS12# used by the internal client to authenticate with Pulsar brokersbrokerClientTlsTrustStoreType=JKS# TLS TrustStore path for internal client# used by the internal client to authenticate with Pulsar brokersbrokerClientTlsTrustStore=# TLS TrustStore password for internal client,# used by the internal client to authenticate with Pulsar brokersbrokerClientTlsTrustStorePassword=# Specify the tls cipher the internal client will use to negotiate during TLS Handshake# (a comma-separated list of ciphers)# e.g. [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256].# used by the internal client to authenticate with Pulsar brokersbrokerClientTlsCiphers=# Specify the tls protocols the broker will use to negotiate during TLS handshake# (a comma-separated list of protocol names).# e.g. [TLSv1.2, TLSv1.1, TLSv1]# used by the internal client to authenticate with Pulsar brokersbrokerClientTlsProtocols=
Authentication
Token Authentication Provider
SASL Authentication Provider
BookKeeper Client
### --- BookKeeper Client --- #### Metadata service uri that bookkeeper is used for loading corresponding metadata driver# and resolving its metadata service location.# This value can be fetched using `bookkeeper shell whatisinstanceid` command in BookKeeper cluster.# For example: zk+hierarchical://localhost:2181/ledgers# The metadata service uri list can also be semicolon separated values like below:# zk+hierarchical://zk1:2181;zk2:2181;zk3:2181/ledgersbookkeeperMetadataServiceUri=# Authentication plugin to use when connecting to bookiesbookkeeperClientAuthenticationPlugin=# BookKeeper auth plugin implementatation specifics parameters name and valuesbookkeeperClientAuthenticationParametersName=bookkeeperClientAuthenticationParameters=# Timeout for BK add / read operationsbookkeeperClientTimeoutInSeconds=30# Speculative reads are initiated if a read request doesn't complete within a certain time# Using a value of 0, is disabling the speculative readsbookkeeperClientSpeculativeReadTimeoutInMillis=0# Number of channels per bookiebookkeeperNumberOfChannelsPerBookie=16# Use older Bookkeeper wire protocol with bookiebookkeeperUseV2WireProtocol=true# Enable bookies health check. Bookies that have more than the configured number of failure within# the interval will be quarantined for some time. During this period, new ledgers won't be created# on these bookiesbookkeeperClientHealthCheckEnabled=truebookkeeperClientHealthCheckIntervalSeconds=60bookkeeperClientHealthCheckErrorThresholdPerInterval=5bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800#bookie quarantine ratio to avoid all clients quarantine the high pressure bookie servers at the same timebookkeeperClientQuarantineRatio=1.0# Specify options for the GetBookieInfo check. These settings can be useful# to help ensure the list of bookies is up to date on the brokers.bookkeeperGetBookieInfoIntervalSeconds=86400bookkeeperGetBookieInfoRetryIntervalSeconds=60# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when# forming a new bookie ensemble# This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy# should be set to org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicybookkeeperClientRackawarePolicyEnabled=true# Enable region-aware bookie selection policy. BK will chose bookies from# different regions and racks when forming a new bookie ensemble# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored# This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy# should be set to org.apache.bookkeeper.client.RegionAwareEnsemblePlacementPolicybookkeeperClientRegionawarePolicyEnabled=false# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum.bookkeeperClientMinNumRacksPerWriteQuorum=2# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum'# racks for a writeQuorum.# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one.bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false# Enable/disable reordering read sequence on reading entries.bookkeeperClientReorderReadSequenceEnabled=false# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie# outside the specified groups will not be used by the brokerbookkeeperClientIsolationGroups=# Enable bookie secondary-isolation group if bookkeeperClientIsolationGroups doesn't# have enough bookie available.bookkeeperClientSecondaryIsolationGroups=# Minimum bookies that should be available as part of bookkeeperClientIsolationGroups# else broker will include bookkeeperClientSecondaryIsolationGroups bookies in isolated list.bookkeeperClientMinAvailableBookiesInIsolationGroups=# Enable/disable having read operations for a ledger to be sticky to a single bookie.# If this flag is enabled, the client will use one single bookie (by preference) to read# all entries for a ledger.## Disable Sticy Read until {@link https://github.com/apache/bookkeeper/issues/1970} is fixedbookkeeperEnableStickyReads=false# Set the client security provider factory class name.# Default: org.apache.bookkeeper.tls.TLSContextFactorybookkeeperTLSProviderFactoryClass=org.apache.bookkeeper.tls.TLSContextFactory# Enable tls authentication with bookiebookkeeperTLSClientAuthentication=false# Supported type: PEM, JKS, PKCS12. Default value: PEMbookkeeperTLSKeyFileType=PEM#Supported type: PEM, JKS, PKCS12. Default value: PEMbookkeeperTLSTrustCertTypes=PEM# Path to file containing keystore password, if the client keystore is password protected.bookkeeperTLSKeyStorePasswordPath=# Path to file containing truststore password, if the client truststore is password protected.bookkeeperTLSTrustStorePasswordPath=# Path for the TLS private key filebookkeeperTLSKeyFilePath=# Path for the TLS certificate filebookkeeperTLSCertificateFilePath=# Path for the trusted TLS certificate filebookkeeperTLSTrustCertsFilePath=# Enable/disable disk weight based placement. Default is falsebookkeeperDiskWeightBasedPlacementEnabled=false# Set the interval to check the need for sending an explicit LAC# A value of '0' disables sending any explicit LACs. Default is 0.bookkeeperExplicitLacIntervalInMills=0# Expose bookkeeper client managed ledger stats to prometheus. default is falsebookkeeperClientExposeStatsToPrometheus=true
Managed Ledger
# Number of bookies to use when creating a ledgermanagedLedgerDefaultEnsembleSize=2# Number of copies to store for each messagemanagedLedgerDefaultWriteQuorum=2# Number of guaranteed copies (acks to wait before write is complete)managedLedgerDefaultAckQuorum=2# How frequently to flush the cursor positions that were accumulated due to rate limiting. (seconds).# Default is 60 secondsmanagedLedgerCursorPositionFlushSeconds = 60# Default type of checksum to use when writing to BookKeeper. Default is "CRC32C"# Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum).managedLedgerDigestType=CRC32C# Number of threads to be used for managed ledger tasks dispatchingmanagedLedgerNumWorkerThreads=8# Number of threads to be used for managed ledger scheduled tasksmanagedLedgerNumSchedulerThreads=8# Amount of memory to use for caching data payload in managed ledger. This memory# is allocated from JVM direct memory and it's shared across all the topics# running in the same broker. By default, uses 1/5th of available direct memorymanagedLedgerCacheSizeMB=# Whether we should make a copy of the entry payloads when inserting in cachemanagedLedgerCacheCopyEntries=false# Threshold to which bring down the cache level when eviction is triggeredmanagedLedgerCacheEvictionWatermark=0.9# Configure the cache eviction frequency for the managed ledger cache (evictions/sec)managedLedgerCacheEvictionFrequency=100.0# All entries that have stayed in cache for more than the configured time, will be evictedmanagedLedgerCacheEvictionTimeThresholdMillis=1000# Configure the threshold (in number of entries) from where a cursor should be considered 'backlogged'# and thus should be set as inactive.managedLedgerCursorBackloggedThreshold=1000# Rate limit the amount of writes per second generated by consumer acking the messagesmanagedLedgerDefaultMarkDeleteRateLimit=1.0# Max number of entries to append to a ledger before triggering a rollover# A ledger rollover is triggered on these conditions# * Either the max rollover time has been reached# * or max entries have been written to the ledged and at least min-time# has passedmanagedLedgerMaxEntriesPerLedger=50000# Minimum time between ledger rollover for a topicmanagedLedgerMinLedgerRolloverTimeMinutes=10# Maximum time before forcing a ledger rollover for a topicmanagedLedgerMaxLedgerRolloverTimeMinutes=240# Maximum ledger size before triggering a rollover for a topic (MB)managedLedgerMaxSizePerLedgerMbytes=2048# Delay between a ledger being successfully offloaded to long term storage# and the ledger being deleted from bookkeeper (default is 4 hours)managedLedgerOffloadDeletionLagMs=14400000# The number of bytes before triggering automatic offload to long term storage# (default is -1, which is disabled)managedLedgerOffloadAutoTriggerSizeThresholdBytes=-1# Max number of entries to append to a cursor ledgermanagedLedgerCursorMaxEntriesPerLedger=50000# Max time before triggering a rollover on a cursor ledgermanagedLedgerCursorRolloverTimeInSeconds=14400# Max number of "acknowledgment holes" that are going to be persistently stored.# When acknowledging out of order, a consumer will leave holes that are supposed# to be quickly filled by acking all the messages. The information of which# messages are acknowledged is persisted by compressing in "ranges" of messages# that were acknowledged. After the max number of ranges is reached, the information# will only be tracked in memory and messages will be redelivered in case of# crashes.managedLedgerMaxUnackedRangesToPersist=10000# Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher# than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into# zookeeper.managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000# Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets# corrupted at bookkeeper and managed-cursor is stuck at that ledger.autoSkipNonRecoverableData=false# Whether to recover cursors lazily when trying to recover a managed ledger backing a persistent topic.# It can improve write availability of topics.# The caveat is now when recovered ledger is ready to write we're not sure if all old consumers last mark# delete position can be recovered or not.lazyCursorRecovery=false# operation timeout while updating managed-ledger metadata.managedLedgerMetadataOperationsTimeoutSeconds=60# Read entries timeout when broker tries to read messages from bookkeeper.managedLedgerReadEntryTimeoutSeconds=0# Add entry timeout when broker tries to publish message to bookkeeper (0 to disable it).managedLedgerAddEntryTimeoutSeconds=0# Managed ledger prometheus stats latency rollover seconds (default: 60s)managedLedgerPrometheusStatsLatencyRolloverSeconds=60# Whether trace managed ledger task execution timemanagedLedgerTraceTaskExecution=true# New entries check delay for the cursor under the managed ledger.# If no new messages in the topic, the cursor will try to check again after the delay time.# For consumption latency sensitive scenario, can set to a smaller value or set to 0.# Of course, use a smaller value may degrade consumption throughput. Default is 10ms.managedLedgerNewEntriesCheckDelayInMillis=10