General broker settings

  1. # Zookeeper quorum connection string
  2. zookeeperServers=192.168.33.3:2181,192.168.33.4:2181,192.168.33.5:2181
  3. # Configuration Store connection string
  4. configurationStoreServers=192.168.33.3:2181,192.168.33.4:2181,192.168.33.5:2181
  5. # Broker data port
  6. brokerServicePort=6650
  7. # Broker data port for TLS - By default TLS is disabled
  8. brokerServicePortTls=
  9. # Port to use to server HTTP request
  10. webServicePort=8081
  11. # Port to use to server HTTPS request - By default TLS is disabled
  12. webServicePortTls=
  13. # Hostname or IP address the service binds on, default is 0.0.0.0.
  14. bindAddress=0.0.0.0
  15. # Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used.
  16. advertisedAddress=192.168.33.5
  17. # Used to specify multiple advertised listeners for the broker.
  18. # The value must format as <listener_name>:pulsar://<host>:<port>,
  19. # multiple listeners should separate with commas.
  20. # Do not use this configuration with advertisedAddress and brokerServicePort.
  21. # The Default value is absent means use advertisedAddress and brokerServicePort.
  22. # advertisedListeners=
  23. # Used to specify the internal listener name for the broker.
  24. # The listener name must contain in the advertisedListeners.
  25. # The Default value is absent, the broker uses the first listener as the internal listener.
  26. # internalListenerName=
  27. # Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors()
  28. numIOThreads=
  29. # Number of threads to use for ordered executor. The ordered executor is used to operate with zookeeper,
  30. # such as init zookeeper client, get namespace policies from zookeeper etc. It also used to split bundle. Default is 8
  31. numOrderedExecutorThreads=8
  32. # Number of threads to use for HTTP requests processing. Default is set to 2 * Runtime.getRuntime().availableProcessors()
  33. numHttpServerThreads=
  34. # Number of thread pool size to use for pulsar broker service.
  35. # The executor in thread pool will do basic broker operation like load/unload bundle, update managedLedgerConfig,
  36. # update topic/subscription/replicator message dispatch rate, do leader election etc.
  37. # Default is Runtime.getRuntime().availableProcessors()
  38. numExecutorThreadPoolSize=
  39. # Number of thread pool size to use for pulsar zookeeper callback service
  40. # The cache executor thread pool is used for restarting global zookeeper session.
  41. # Default is 10
  42. numCacheExecutorThreadPoolSize=10
  43. # Max concurrent web requests
  44. maxConcurrentHttpRequests=1024
  45. # Flag to control features that are meant to be used when running in standalone mode
  46. isRunningStandalone=
  47. # Name of the cluster to which this broker belongs to
  48. clusterName=pulsar-cluster
  49. # The maximum number of tenants that each pulsar cluster can create
  50. # This configuration is not precise control, in a concurrent scenario, the threshold will be exceeded
  51. maxTenants=0
  52. # Enable cluster's failure-domain which can distribute brokers into logical region
  53. failureDomainsEnabled=false
  54. # Zookeeper session timeout in milliseconds
  55. zooKeeperSessionTimeoutMillis=30000
  56. # ZooKeeper operation timeout in seconds
  57. zooKeeperOperationTimeoutSeconds=30
  58. # ZooKeeper cache expiry time in seconds
  59. zooKeeperCacheExpirySeconds=300
  60. # Time to wait for broker graceful shutdown. After this time elapses, the process will be killed
  61. brokerShutdownTimeoutMs=60000
  62. # Flag to skip broker shutdown when broker handles Out of memory error
  63. skipBrokerShutdownOnOOM=false
  64. # Enable backlog quota check. Enforces action on topic when the quota is reached
  65. backlogQuotaCheckEnabled=true
  66. # How often to check for topics that have reached the quota
  67. backlogQuotaCheckIntervalInSeconds=60
  68. # Default per-topic backlog quota limit, less than 0 means no limitation. default is -1.
  69. backlogQuotaDefaultLimitGB=-1
  70. # Default backlog quota retention policy. Default is producer_request_hold
  71. # 'producer_request_hold' Policy which holds producer's send request until the resource becomes available (or holding times out)
  72. # 'producer_exception' Policy which throws javax.jms.ResourceAllocationException to the producer
  73. # 'consumer_backlog_eviction' Policy which evicts the oldest message from the slowest consumer's backlog
  74. backlogQuotaDefaultRetentionPolicy=producer_request_hold
  75. # Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0)
  76. ttlDurationDefaultInSeconds=0
  77. # Enable topic auto creation if new producer or consumer connected (disable auto creation with value false)
  78. allowAutoTopicCreation=true
  79. # The type of topic that is allowed to be automatically created.(partitioned/non-partitioned)
  80. allowAutoTopicCreationType=non-partitioned
  81. # Enable subscription auto creation if new consumer connected (disable auto creation with value false)
  82. allowAutoSubscriptionCreation=true
  83. # The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned.
  84. defaultNumPartitions=1
  85. # Enable the deletion of inactive topics
  86. brokerDeleteInactiveTopicsEnabled=true
  87. # How often to check for inactive topics
  88. brokerDeleteInactiveTopicsFrequencySeconds=60
  89. # Set the inactive topic delete mode. Default is delete_when_no_subscriptions
  90. # 'delete_when_no_subscriptions' mode only delete the topic which has no subscriptions and no active producers
  91. # 'delete_when_subscriptions_caught_up' mode only delete the topic that all subscriptions has no backlogs(caught up)
  92. # and no active producers/consumers
  93. brokerDeleteInactiveTopicsMode=delete_when_no_subscriptions
  94. # Metadata of inactive partitioned topic will not be cleaned up automatically by default.
  95. # Note: If `allowAutoTopicCreation` and this option are enabled at the same time,
  96. # it may appear that a partitioned topic has just been deleted but is automatically created as a non-partitioned topic.
  97. brokerDeleteInactivePartitionedTopicMetadataEnabled=false
  98. # Max duration of topic inactivity in seconds, default is not present
  99. # If not present, 'brokerDeleteInactiveTopicsFrequencySeconds' will be used
  100. # Topics that are inactive for longer than this value will be deleted
  101. brokerDeleteInactiveTopicsMaxInactiveDurationSeconds=
  102. # Max pending publish requests per connection to avoid keeping large number of pending
  103. # requests in memory. Default: 1000
  104. maxPendingPublishdRequestsPerConnection=1000
  105. # How frequently to proactively check and purge expired messages
  106. messageExpiryCheckIntervalInMinutes=5
  107. # How long to delay rewinding cursor and dispatching messages when active consumer is changed
  108. activeConsumerFailoverDelayTimeMillis=1000
  109. # How long to delete inactive subscriptions from last consuming
  110. # When it is 0, inactive subscriptions are not deleted automatically
  111. subscriptionExpirationTimeMinutes=0
  112. # Enable subscription message redelivery tracker to send redelivery count to consumer (default is enabled)
  113. subscriptionRedeliveryTrackerEnabled=true
  114. # How frequently to proactively check and purge expired subscription
  115. subscriptionExpiryCheckIntervalInMinutes=5
  116. # Enable Key_Shared subscription (default is enabled)
  117. subscriptionKeySharedEnable=true
  118. # On KeyShared subscriptions, with default AUTO_SPLIT mode, use splitting ranges or
  119. # consistent hashing to reassign keys to new consumers
  120. subscriptionKeySharedUseConsistentHashing=false
  121. # On KeyShared subscriptions, number of points in the consistent-hashing ring.
  122. # The higher the number, the more equal the assignment of keys to consumers
  123. subscriptionKeySharedConsistentHashingReplicaPoints=100
  124. # Set the default behavior for message deduplication in the broker
  125. # This can be overridden per-namespace. If enabled, broker will reject
  126. # messages that were already stored in the topic
  127. brokerDeduplicationEnabled=false
  128. # Maximum number of producer information that it's going to be
  129. # persisted for deduplication purposes
  130. brokerDeduplicationMaxNumberOfProducers=10000
  131. # How often is the thread pool scheduled to check whether a snapshot needs to be taken.(disable with value 0)
  132. brokerDeduplicationSnapshotFrequencyInSeconds=10
  133. # If this time interval is exceeded, a snapshot will be taken.
  134. # It will run simultaneously with `brokerDeduplicationEntriesInterval`
  135. brokerDeduplicationSnapshotIntervalSeconds=120
  136. # Number of entries after which a dedup info snapshot is taken.
  137. # A larger interval will lead to fewer snapshots being taken, though it would
  138. # increase the topic recovery time when the entries published after the
  139. # snapshot need to be replayed.
  140. brokerDeduplicationEntriesInterval=1000
  141. # Time of inactivity after which the broker will discard the deduplication information
  142. # relative to a disconnected producer. Default is 6 hours.
  143. brokerDeduplicationProducerInactivityTimeoutMinutes=360
  144. # When a namespace is created without specifying the number of bundle, this
  145. # value will be used as the default
  146. defaultNumberOfNamespaceBundles=4
  147. # The maximum number of namespaces that each tenant can create
  148. # This configuration is not precise control, in a concurrent scenario, the threshold will be exceeded
  149. maxNamespacesPerTenant=0
  150. # Enable check for minimum allowed client library version
  151. clientLibraryVersionCheckEnabled=false
  152. # Path for the file used to determine the rotation status for the broker when responding
  153. # to service discovery health checks
  154. statusFilePath=
  155. # If true, (and ModularLoadManagerImpl is being used), the load manager will attempt to
  156. # use only brokers running the latest software version (to minimize impact to bundles)
  157. preferLaterVersions=false
  158. # Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending
  159. # messages to consumer once, this limit reaches until consumer starts acknowledging messages back.
  160. # Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction
  161. maxUnackedMessagesPerConsumer=50000
  162. # Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to
  163. # all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and
  164. # unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit
  165. # check and dispatcher can dispatch messages without any restriction
  166. maxUnackedMessagesPerSubscription=200000
  167. # Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching
  168. # messages to all shared subscription which has higher number of unack messages until subscriptions start
  169. # acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling
  170. # unackedMessage-limit check and broker doesn't block dispatchers
  171. maxUnackedMessagesPerBroker=0
  172. # Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages
  173. # than this percentage limit and subscription will not receive any new messages until that subscription acks back
  174. # limit/2 messages
  175. maxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16
  176. # Tick time to schedule task that checks topic publish rate limiting across all topics
  177. # Reducing to lower value can give more accuracy while throttling publish but
  178. # it uses more CPU to perform frequent check. (Disable publish throttling with value 0)
  179. topicPublisherThrottlingTickTimeMillis=10
  180. # Tick time to schedule task that checks broker publish rate limiting across all topics
  181. # Reducing to lower value can give more accuracy while throttling publish but
  182. # it uses more CPU to perform frequent check. (Disable publish throttling with value 0)
  183. brokerPublisherThrottlingTickTimeMillis=50
  184. # Max Rate(in 1 seconds) of Message allowed to publish for a broker if broker publish rate limiting enabled
  185. # (Disable message rate limit with value 0)
  186. brokerPublisherThrottlingMaxMessageRate=0
  187. # Max Rate(in 1 seconds) of Byte allowed to publish for a broker if broker publish rate limiting enabled.
  188. # (Disable byte rate limit with value 0)
  189. brokerPublisherThrottlingMaxByteRate=0
  190. # Max Rate(in 1 seconds) of Message allowed to publish for a topic if topic publish rate limiting enabled
  191. # (Disable byte rate limit with value 0)
  192. maxPublishRatePerTopicInMessages=0
  193. #Max Rate(in 1 seconds) of Byte allowed to publish for a topic if topic publish rate limiting enabled.
  194. # (Disable byte rate limit with value 0)
  195. maxPublishRatePerTopicInBytes=0
  196. # Too many subscribe requests from a consumer can cause broker rewinding consumer cursors and loading data from bookies,
  197. # hence causing high network bandwidth usage
  198. # When the positive value is set, broker will throttle the subscribe requests for one consumer.
  199. # Otherwise, the throttling will be disabled. The default value of this setting is 0 - throttling is disabled.
  200. subscribeThrottlingRatePerConsumer=0
  201. # Rate period for {subscribeThrottlingRatePerConsumer}. Default is 30s.
  202. subscribeRatePeriodPerConsumerInSecond=30
  203. # Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default
  204. # message dispatch-throttling
  205. dispatchThrottlingRatePerTopicInMsg=0
  206. # Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling
  207. # default message-byte dispatch-throttling
  208. dispatchThrottlingRatePerTopicInByte=0
  209. # Default number of message dispatching throttling-limit for a subscription.
  210. # Using a value of 0, is disabling default message dispatch-throttling.
  211. dispatchThrottlingRatePerSubscriptionInMsg=0
  212. # Default number of message-bytes dispatching throttling-limit for a subscription.
  213. # Using a value of 0, is disabling default message-byte dispatch-throttling.
  214. dispatchThrottlingRatePerSubscriptionInByte=0
  215. # Default messages per second dispatch throttling-limit for every replicator in replication.
  216. # Using a value of 0, is disabling replication message dispatch-throttling
  217. dispatchThrottlingRatePerReplicatorInMsg=0
  218. # Default bytes per second dispatch throttling-limit for every replicator in replication.
  219. # Using a value of 0, is disabling replication message-byte dispatch-throttling
  220. dispatchThrottlingRatePerReplicatorInByte=0
  221. # Dispatch rate-limiting relative to publish rate.
  222. # (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate:
  223. # throttle-dispatch-rate = (publish-rate + configured dispatch-rate).
  224. dispatchThrottlingRateRelativeToPublishRate=false
  225. # By default we enable dispatch-throttling for both caught up consumers as well as consumers who have
  226. # backlog.
  227. dispatchThrottlingOnNonBacklogConsumerEnabled=true
  228. # Max number of entries to read from bookkeeper. By default it is 100 entries.
  229. dispatcherMaxReadBatchSize=100
  230. # Max size in bytes of entries to read from bookkeeper. By default it is 5MB.
  231. dispatcherMaxReadSizeBytes=5242880
  232. # Min number of entries to read from bookkeeper. By default it is 1 entries.
  233. # When there is an error occurred on reading entries from bookkeeper, the broker
  234. # will backoff the batch size to this minimum number."
  235. dispatcherMinReadBatchSize=1
  236. # Max number of entries to dispatch for a shared subscription. By default it is 20 entries.
  237. dispatcherMaxRoundRobinBatchSize=20
  238. # Precise dispathcer flow control according to history message number of each entry
  239. preciseDispatcherFlowControl=false
  240. # Max number of concurrent lookup request broker allows to throttle heavy incoming lookup traffic
  241. maxConcurrentLookupRequest=50000
  242. # Max number of concurrent topic loading request broker allows to control number of zk-operations
  243. maxConcurrentTopicLoadRequest=5000
  244. # Max concurrent non-persistent message can be processed per connection
  245. maxConcurrentNonPersistentMessagePerConnection=1000
  246. # Number of worker threads to serve non-persistent topic
  247. numWorkerThreadsForNonPersistentTopic=8
  248. # Enable broker to load persistent topics
  249. enablePersistentTopics=true
  250. # Enable broker to load non-persistent topics
  251. enableNonPersistentTopics=true
  252. # Enable to run bookie along with broker
  253. enableRunBookieTogether=false
  254. # Enable to run bookie autorecovery along with broker
  255. enableRunBookieAutoRecoveryTogether=false
  256. # Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers
  257. # until the number of connected producers decrease.
  258. # Using a value of 0, is disabling maxProducersPerTopic-limit check.
  259. maxProducersPerTopic=0
  260. # Enforce producer to publish encrypted messages.(default disable).
  261. encryptionRequireOnProducer=false
  262. # Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers
  263. # until the number of connected consumers decrease.
  264. # Using a value of 0, is disabling maxConsumersPerTopic-limit check.
  265. maxConsumersPerTopic=0
  266. # Max number of subscriptions allowed to subscribe to topic. Once this limit reaches, broker will reject
  267. # new subscription until the number of subscribed subscriptions decrease.
  268. # Using a value of 0, is disabling maxSubscriptionsPerTopic limit check.
  269. maxSubscriptionsPerTopic=0
  270. # Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers
  271. # until the number of connected consumers decrease.
  272. # Using a value of 0, is disabling maxConsumersPerSubscription-limit check.
  273. maxConsumersPerSubscription=0
  274. # Max size of messages.
  275. maxMessageSize=5242880
  276. # Interval between checks to see if topics with compaction policies need to be compacted
  277. brokerServiceCompactionMonitorIntervalInSeconds=60
  278. # Whether to enable the delayed delivery for messages.
  279. # If disabled, messages will be immediately delivered and there will
  280. # be no tracking overhead.
  281. delayedDeliveryEnabled=true
  282. # Control the tick time for when retrying on delayed delivery,
  283. # affecting the accuracy of the delivery time compared to the scheduled time.
  284. # Default is 1 second.
  285. delayedDeliveryTickTimeMillis=1000
  286. # Whether to enable acknowledge of batch local index.
  287. acknowledgmentAtBatchIndexLevelEnabled=false
  288. # Enable tracking of replicated subscriptions state across clusters.
  289. enableReplicatedSubscriptions=true
  290. # Frequency of snapshots for replicated subscriptions tracking.
  291. replicatedSubscriptionsSnapshotFrequencyMillis=1000
  292. # Timeout for building a consistent snapshot for tracking replicated subscriptions state.
  293. replicatedSubscriptionsSnapshotTimeoutSeconds=30
  294. # Max number of snapshot to be cached per subscription.
  295. replicatedSubscriptionsSnapshotMaxCachedPerSubscription=10
  296. # Max memory size for broker handling messages sending from producers.
  297. # If the processing message size exceed this value, broker will stop read data
  298. # from the connection. The processing messages means messages are sends to broker
  299. # but broker have not send response to client, usually waiting to write to bookies.
  300. # It's shared across all the topics running in the same broker.
  301. # Use -1 to disable the memory limitation. Default is 1/2 of direct memory.
  302. maxMessagePublishBufferSizeInMB=
  303. # Interval between checks to see if message publish buffer size is exceed the max message publish buffer size
  304. # Use 0 or negative number to disable the max publish buffer limiting.
  305. messagePublishBufferCheckIntervalInMillis=100
  306. # Check between intervals to see if consumed ledgers need to be trimmed
  307. # Use 0 or negative number to disable the check
  308. retentionCheckIntervalInSeconds=120
  309. # Max number of partitions per partitioned topic
  310. # Use 0 or negative number to disable the check
  311. maxNumPartitionsPerPartitionedTopic=0
  312. # There are two policies when zookeeper session expired happens, "shutdown" and "reconnect".
  313. # If uses "shutdown" policy, shutdown the broker when zookeeper session expired happens.
  314. # If uses "reconnect" policy, try to reconnect to zookeeper server and re-register metadata to zookeeper.
  315. # Node: the "reconnect" policy is an experiment feature
  316. zookeeperSessionExpiredPolicy=shutdown
  317. # Enable or disable system topic
  318. systemTopicEnabled=false
  319. # Enable or disable topic level policies, topic level policies depends on the system topic
  320. # Please enable the system topic first.
  321. topicLevelPoliciesEnabled=false
  322. # If a topic remains fenced for this number of seconds, it will be closed forcefully.
  323. # If it is set to 0 or a negative number, the fenced topic will not be closed.
  324. topicFencingTimeoutSeconds=0
  325. ### --- Authentication --- ###
  326. # Role names that are treated as "proxy roles". If the broker sees a request with
  327. #role as proxyRoles - it will demand to see a valid original principal.
  328. proxyRoles=
  329. # If this flag is set then the broker authenticates the original Auth data
  330. # else it just accepts the originalPrincipal and authorizes it (if required).
  331. authenticateOriginalAuthData=false
  332. # Deprecated - Use webServicePortTls and brokerServicePortTls instead
  333. tlsEnabled=false
  334. # Tls cert refresh duration in seconds (set 0 to check on every new connection)
  335. tlsCertRefreshCheckDurationSec=300
  336. # Path for the TLS certificate file
  337. tlsCertificateFilePath=
  338. # Path for the TLS private key file
  339. tlsKeyFilePath=
  340. # Path for the trusted TLS certificate file.
  341. # This cert is used to verify that any certs presented by connecting clients
  342. # are signed by a certificate authority. If this verification
  343. # fails, then the certs are untrusted and the connections are dropped.
  344. tlsTrustCertsFilePath=
  345. # Accept untrusted TLS certificate from client.
  346. # If true, a client with a cert which cannot be verified with the
  347. # 'tlsTrustCertsFilePath' cert will allowed to connect to the server,
  348. # though the cert will not be used for client authentication.
  349. tlsAllowInsecureConnection=false
  350. # Specify the tls protocols the broker will use to negotiate during TLS handshake
  351. # (a comma-separated list of protocol names).
  352. # Examples:- [TLSv1.2, TLSv1.1, TLSv1]
  353. tlsProtocols=
  354. # Specify the tls cipher the broker will use to negotiate during TLS Handshake
  355. # (a comma-separated list of ciphers).
  356. # Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]
  357. tlsCiphers=
  358. # Trusted client certificates are required for to connect TLS
  359. # Reject the Connection if the Client Certificate is not trusted.
  360. # In effect, this requires that all connecting clients perform TLS client
  361. # authentication.
  362. tlsRequireTrustedClientCertOnConnect=false
  363. ### --- KeyStore TLS config variables --- ###
  364. # Enable TLS with KeyStore type configuration in broker.
  365. tlsEnabledWithKeyStore=false
  366. # TLS Provider for KeyStore type
  367. tlsProvider=
  368. # TLS KeyStore type configuration in broker: JKS, PKCS12
  369. tlsKeyStoreType=JKS
  370. # TLS KeyStore path in broker
  371. tlsKeyStore=
  372. # TLS KeyStore password for broker
  373. tlsKeyStorePassword=
  374. # TLS TrustStore type configuration in broker: JKS, PKCS12
  375. tlsTrustStoreType=JKS
  376. # TLS TrustStore path in broker
  377. tlsTrustStore=
  378. # TLS TrustStore password in broker
  379. tlsTrustStorePassword=
  380. # Whether internal client use KeyStore type to authenticate with Pulsar brokers
  381. brokerClientTlsEnabledWithKeyStore=false
  382. # The TLS Provider used by internal client to authenticate with other Pulsar brokers
  383. brokerClientSslProvider=
  384. # TLS TrustStore type configuration for internal client: JKS, PKCS12
  385. # used by the internal client to authenticate with Pulsar brokers
  386. brokerClientTlsTrustStoreType=JKS
  387. # TLS TrustStore path for internal client
  388. # used by the internal client to authenticate with Pulsar brokers
  389. brokerClientTlsTrustStore=
  390. # TLS TrustStore password for internal client,
  391. # used by the internal client to authenticate with Pulsar brokers
  392. brokerClientTlsTrustStorePassword=
  393. # Specify the tls cipher the internal client will use to negotiate during TLS Handshake
  394. # (a comma-separated list of ciphers)
  395. # e.g. [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256].
  396. # used by the internal client to authenticate with Pulsar brokers
  397. brokerClientTlsCiphers=
  398. # Specify the tls protocols the broker will use to negotiate during TLS handshake
  399. # (a comma-separated list of protocol names).
  400. # e.g. [TLSv1.2, TLSv1.1, TLSv1]
  401. # used by the internal client to authenticate with Pulsar brokers
  402. brokerClientTlsProtocols=

Authentication

Token Authentication Provider

SASL Authentication Provider

BookKeeper Client

  1. ### --- BookKeeper Client --- ###
  2. # Metadata service uri that bookkeeper is used for loading corresponding metadata driver
  3. # and resolving its metadata service location.
  4. # This value can be fetched using `bookkeeper shell whatisinstanceid` command in BookKeeper cluster.
  5. # For example: zk+hierarchical://localhost:2181/ledgers
  6. # The metadata service uri list can also be semicolon separated values like below:
  7. # zk+hierarchical://zk1:2181;zk2:2181;zk3:2181/ledgers
  8. bookkeeperMetadataServiceUri=
  9. # Authentication plugin to use when connecting to bookies
  10. bookkeeperClientAuthenticationPlugin=
  11. # BookKeeper auth plugin implementatation specifics parameters name and values
  12. bookkeeperClientAuthenticationParametersName=
  13. bookkeeperClientAuthenticationParameters=
  14. # Timeout for BK add / read operations
  15. bookkeeperClientTimeoutInSeconds=30
  16. # Speculative reads are initiated if a read request doesn't complete within a certain time
  17. # Using a value of 0, is disabling the speculative reads
  18. bookkeeperClientSpeculativeReadTimeoutInMillis=0
  19. # Number of channels per bookie
  20. bookkeeperNumberOfChannelsPerBookie=16
  21. # Use older Bookkeeper wire protocol with bookie
  22. bookkeeperUseV2WireProtocol=true
  23. # Enable bookies health check. Bookies that have more than the configured number of failure within
  24. # the interval will be quarantined for some time. During this period, new ledgers won't be created
  25. # on these bookies
  26. bookkeeperClientHealthCheckEnabled=true
  27. bookkeeperClientHealthCheckIntervalSeconds=60
  28. bookkeeperClientHealthCheckErrorThresholdPerInterval=5
  29. bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800
  30. #bookie quarantine ratio to avoid all clients quarantine the high pressure bookie servers at the same time
  31. bookkeeperClientQuarantineRatio=1.0
  32. # Specify options for the GetBookieInfo check. These settings can be useful
  33. # to help ensure the list of bookies is up to date on the brokers.
  34. bookkeeperGetBookieInfoIntervalSeconds=86400
  35. bookkeeperGetBookieInfoRetryIntervalSeconds=60
  36. # Enable rack-aware bookie selection policy. BK will chose bookies from different racks when
  37. # forming a new bookie ensemble
  38. # This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy
  39. # should be set to org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy
  40. bookkeeperClientRackawarePolicyEnabled=true
  41. # Enable region-aware bookie selection policy. BK will chose bookies from
  42. # different regions and racks when forming a new bookie ensemble
  43. # If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored
  44. # This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy
  45. # should be set to org.apache.bookkeeper.client.RegionAwareEnsemblePlacementPolicy
  46. bookkeeperClientRegionawarePolicyEnabled=false
  47. # Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to
  48. # get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum.
  49. bookkeeperClientMinNumRacksPerWriteQuorum=2
  50. # Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum'
  51. # racks for a writeQuorum.
  52. # If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one.
  53. bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false
  54. # Enable/disable reordering read sequence on reading entries.
  55. bookkeeperClientReorderReadSequenceEnabled=false
  56. # Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie
  57. # outside the specified groups will not be used by the broker
  58. bookkeeperClientIsolationGroups=
  59. # Enable bookie secondary-isolation group if bookkeeperClientIsolationGroups doesn't
  60. # have enough bookie available.
  61. bookkeeperClientSecondaryIsolationGroups=
  62. # Minimum bookies that should be available as part of bookkeeperClientIsolationGroups
  63. # else broker will include bookkeeperClientSecondaryIsolationGroups bookies in isolated list.
  64. bookkeeperClientMinAvailableBookiesInIsolationGroups=
  65. # Enable/disable having read operations for a ledger to be sticky to a single bookie.
  66. # If this flag is enabled, the client will use one single bookie (by preference) to read
  67. # all entries for a ledger.
  68. #
  69. # Disable Sticy Read until {@link https://github.com/apache/bookkeeper/issues/1970} is fixed
  70. bookkeeperEnableStickyReads=false
  71. # Set the client security provider factory class name.
  72. # Default: org.apache.bookkeeper.tls.TLSContextFactory
  73. bookkeeperTLSProviderFactoryClass=org.apache.bookkeeper.tls.TLSContextFactory
  74. # Enable tls authentication with bookie
  75. bookkeeperTLSClientAuthentication=false
  76. # Supported type: PEM, JKS, PKCS12. Default value: PEM
  77. bookkeeperTLSKeyFileType=PEM
  78. #Supported type: PEM, JKS, PKCS12. Default value: PEM
  79. bookkeeperTLSTrustCertTypes=PEM
  80. # Path to file containing keystore password, if the client keystore is password protected.
  81. bookkeeperTLSKeyStorePasswordPath=
  82. # Path to file containing truststore password, if the client truststore is password protected.
  83. bookkeeperTLSTrustStorePasswordPath=
  84. # Path for the TLS private key file
  85. bookkeeperTLSKeyFilePath=
  86. # Path for the TLS certificate file
  87. bookkeeperTLSCertificateFilePath=
  88. # Path for the trusted TLS certificate file
  89. bookkeeperTLSTrustCertsFilePath=
  90. # Enable/disable disk weight based placement. Default is false
  91. bookkeeperDiskWeightBasedPlacementEnabled=false
  92. # Set the interval to check the need for sending an explicit LAC
  93. # A value of '0' disables sending any explicit LACs. Default is 0.
  94. bookkeeperExplicitLacIntervalInMills=0
  95. # Expose bookkeeper client managed ledger stats to prometheus. default is false
  96. bookkeeperClientExposeStatsToPrometheus=true

Managed Ledger

  1. # Number of bookies to use when creating a ledger
  2. managedLedgerDefaultEnsembleSize=2
  3. # Number of copies to store for each message
  4. managedLedgerDefaultWriteQuorum=2
  5. # Number of guaranteed copies (acks to wait before write is complete)
  6. managedLedgerDefaultAckQuorum=2
  7. # How frequently to flush the cursor positions that were accumulated due to rate limiting. (seconds).
  8. # Default is 60 seconds
  9. managedLedgerCursorPositionFlushSeconds = 60
  10. # Default type of checksum to use when writing to BookKeeper. Default is "CRC32C"
  11. # Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum).
  12. managedLedgerDigestType=CRC32C
  13. # Number of threads to be used for managed ledger tasks dispatching
  14. managedLedgerNumWorkerThreads=8
  15. # Number of threads to be used for managed ledger scheduled tasks
  16. managedLedgerNumSchedulerThreads=8
  17. # Amount of memory to use for caching data payload in managed ledger. This memory
  18. # is allocated from JVM direct memory and it's shared across all the topics
  19. # running in the same broker. By default, uses 1/5th of available direct memory
  20. managedLedgerCacheSizeMB=
  21. # Whether we should make a copy of the entry payloads when inserting in cache
  22. managedLedgerCacheCopyEntries=false
  23. # Threshold to which bring down the cache level when eviction is triggered
  24. managedLedgerCacheEvictionWatermark=0.9
  25. # Configure the cache eviction frequency for the managed ledger cache (evictions/sec)
  26. managedLedgerCacheEvictionFrequency=100.0
  27. # All entries that have stayed in cache for more than the configured time, will be evicted
  28. managedLedgerCacheEvictionTimeThresholdMillis=1000
  29. # Configure the threshold (in number of entries) from where a cursor should be considered 'backlogged'
  30. # and thus should be set as inactive.
  31. managedLedgerCursorBackloggedThreshold=1000
  32. # Rate limit the amount of writes per second generated by consumer acking the messages
  33. managedLedgerDefaultMarkDeleteRateLimit=1.0
  34. # Max number of entries to append to a ledger before triggering a rollover
  35. # A ledger rollover is triggered on these conditions
  36. # * Either the max rollover time has been reached
  37. # * or max entries have been written to the ledged and at least min-time
  38. # has passed
  39. managedLedgerMaxEntriesPerLedger=50000
  40. # Minimum time between ledger rollover for a topic
  41. managedLedgerMinLedgerRolloverTimeMinutes=10
  42. # Maximum time before forcing a ledger rollover for a topic
  43. managedLedgerMaxLedgerRolloverTimeMinutes=240
  44. # Maximum ledger size before triggering a rollover for a topic (MB)
  45. managedLedgerMaxSizePerLedgerMbytes=2048
  46. # Delay between a ledger being successfully offloaded to long term storage
  47. # and the ledger being deleted from bookkeeper (default is 4 hours)
  48. managedLedgerOffloadDeletionLagMs=14400000
  49. # The number of bytes before triggering automatic offload to long term storage
  50. # (default is -1, which is disabled)
  51. managedLedgerOffloadAutoTriggerSizeThresholdBytes=-1
  52. # Max number of entries to append to a cursor ledger
  53. managedLedgerCursorMaxEntriesPerLedger=50000
  54. # Max time before triggering a rollover on a cursor ledger
  55. managedLedgerCursorRolloverTimeInSeconds=14400
  56. # Max number of "acknowledgment holes" that are going to be persistently stored.
  57. # When acknowledging out of order, a consumer will leave holes that are supposed
  58. # to be quickly filled by acking all the messages. The information of which
  59. # messages are acknowledged is persisted by compressing in "ranges" of messages
  60. # that were acknowledged. After the max number of ranges is reached, the information
  61. # will only be tracked in memory and messages will be redelivered in case of
  62. # crashes.
  63. managedLedgerMaxUnackedRangesToPersist=10000
  64. # Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher
  65. # than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into
  66. # zookeeper.
  67. managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000
  68. # Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets
  69. # corrupted at bookkeeper and managed-cursor is stuck at that ledger.
  70. autoSkipNonRecoverableData=false
  71. # Whether to recover cursors lazily when trying to recover a managed ledger backing a persistent topic.
  72. # It can improve write availability of topics.
  73. # The caveat is now when recovered ledger is ready to write we're not sure if all old consumers last mark
  74. # delete position can be recovered or not.
  75. lazyCursorRecovery=false
  76. # operation timeout while updating managed-ledger metadata.
  77. managedLedgerMetadataOperationsTimeoutSeconds=60
  78. # Read entries timeout when broker tries to read messages from bookkeeper.
  79. managedLedgerReadEntryTimeoutSeconds=0
  80. # Add entry timeout when broker tries to publish message to bookkeeper (0 to disable it).
  81. managedLedgerAddEntryTimeoutSeconds=0
  82. # Managed ledger prometheus stats latency rollover seconds (default: 60s)
  83. managedLedgerPrometheusStatsLatencyRolloverSeconds=60
  84. # Whether trace managed ledger task execution time
  85. managedLedgerTraceTaskExecution=true
  86. # New entries check delay for the cursor under the managed ledger.
  87. # If no new messages in the topic, the cursor will try to check again after the delay time.
  88. # For consumption latency sensitive scenario, can set to a smaller value or set to 0.
  89. # Of course, use a smaller value may degrade consumption throughput. Default is 10ms.
  90. managedLedgerNewEntriesCheckDelayInMillis=10