https://github.com/sentry-kubernetes/charts
https://artifacthub.io/packages/helm/sentry/sentry
helm repo add sentry https://sentry-kubernetes.github.io/charts
helm install sentry sentry/sentry
helm install sentry sentry/sentry -f values.yaml
NAME: sentry
LAST DEPLOYED: Tue Aug 9 10:37:15 2022
NAMESPACE: infrastructure-prod
STATUS: pending-install
REVISION: 1
TEST SUITE: None
USER-SUPPLIED VALUES:
asHook: true
auth:
register: true
clickhouse:
clickhouse:
configmap:
builtin_dictionaries_reload_interval: "3600"
compression:
cases:
- method: zstd
min_part_size: "10000000000"
min_part_size_ratio: "0.01"
enabled: false
default_session_timeout: "60"
disable_internal_dns_cache: "1"
enabled: true
graphite:
config:
- asynchronous_metrics: true
events: true
events_cumulative: true
interval: "60"
metrics: true
root_path: one_min
timeout: "0.1"
enabled: false
keep_alive_timeout: "3"
logger:
count: "10"
level: trace
path: /var/log/clickhouse-server
size: 1000M
stdoutLogsEnabled: false
mark_cache_size: "5368709120"
max_concurrent_queries: "100"
max_connections: "4096"
max_session_timeout: "3600"
mlock_executable: false
profiles:
enabled: false
profile:
- config:
load_balancing: random
max_memory_usage: "10000000000"
use_uncompressed_cache: "0"
name: default
quotas:
enabled: false
quota:
- config:
- duration: "3600"
errors: "0"
execution_time: "0"
queries: "0"
read_rows: "0"
result_rows: "0"
name: default
remote_servers:
enabled: true
internal_replication: true
replica:
backup:
enabled: false
compression: true
user: default
umask: "022"
uncompressed_cache_size: "8589934592"
users:
enabled: false
user:
- config:
networks:
- ::/0
password: ""
profile: default
quota: default
name: default
zookeeper_servers:
config:
- hostTemplate: '{{ .Release.Name }}-zookeeper-clickhouse'
index: clickhouse
port: "2181"
enabled: true
operation_timeout_ms: "10000"
session_timeout_ms: "30000"
http_port: "8123"
image: yandex/clickhouse-server
imagePullPolicy: IfNotPresent
imageVersion: 20.8.19.4
ingress:
enabled: false
init:
image: busybox
imagePullPolicy: IfNotPresent
imageVersion: 1.31.0
resources: {}
interserver_http_port: "9009"
livenessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
metrics:
enabled: false
image:
port: 9116
pullPolicy: IfNotPresent
registry: docker.io
repository: f1yegor/clickhouse-exporter
tag: latest
podAnnotations:
prometheus.io/port: "9116"
prometheus.io/scrape: "true"
podLabels: {}
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
service:
annotations: {}
labels: {}
type: ClusterIP
serviceMonitor:
enabled: false
selector:
prometheus: kube-prometheus
path: /var/lib/clickhouse
persistentVolumeClaim:
dataPersistentVolume:
accessModes:
- ReadWriteOnce
enabled: true
storage: 30Gi
enabled: true
logsPersistentVolume:
accessModes:
- ReadWriteOnce
enabled: false
storage: 50Gi
podManagementPolicy: Parallel
podSecurityContext: {}
readinessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
replicas: "3"
resources: {}
securityContext: {}
tcp_port: "9000"
updateStrategy: RollingUpdate
clusterDomain: cluster.local
enabled: true
global: {}
serviceAccount:
annotations: {}
automountServiceAccountToken: true
enabled: false
name: clickhouse
tabix:
enabled: false
image: spoonest/clickhouse-tabix-web-client
imagePullPolicy: IfNotPresent
imageVersion: stable
ingress:
enabled: false
livenessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
podAnnotations: null
podLabels: null
readinessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
replicas: "1"
resources: {}
security:
password: admin
user: admin
updateStrategy:
maxSurge: 3
maxUnavailable: 1
type: RollingUpdate
timezone: UTC
config:
configYml: {}
relay: |
# No YAML relay config given
sentryConfPy: |
# No Python Extension Config Given
snubaSettingsPy: |
# No Python Extension Config Given
externalClickhouse:
database: default
host: clickhouse
httpPort: 8123
password: ""
singleNode: true
tcpPort: 9000
username: default
externalKafka:
port: 9092
externalPostgresql:
database: sentry
port: 5432
username: postgres
externalRedis:
port: 6379
filestore:
backend: filesystem
filesystem:
path: /var/lib/sentry/files
persistence:
accessMode: ReadWriteOnce
enabled: true
existingClaim: ""
persistentWorkers: false
size: 10Gi
gcs: {}
s3: {}
geodata:
mountPath: ""
path: ""
volumeName: ""
github: {}
google: {}
hooks:
activeDeadlineSeconds: 100
dbCheck:
affinity: {}
env: []
image:
imagePullSecrets: []
nodeSelector: {}
podAnnotations: {}
resources:
limits:
memory: 64Mi
requests:
cpu: 100m
memory: 64Mi
securityContext: {}
dbInit:
affinity: {}
env: []
nodeSelector: {}
podAnnotations: {}
resources:
limits:
memory: 2048Mi
requests:
cpu: 300m
memory: 2048Mi
sidecars: []
volumes: []
enabled: true
removeOnSuccess: true
shareProcessNamespace: false
snubaInit:
affinity: {}
nodeSelector: {}
podAnnotations: {}
resources:
limits:
cpu: 2000m
memory: 1Gi
requests:
cpu: 700m
memory: 1Gi
snubaMigrate: {}
images:
relay:
imagePullSecrets: []
sentry:
imagePullSecrets: []
snuba:
imagePullSecrets: []
symbolicator:
imagePullSecrets: []
tag: 0.5.1
ingress:
alb:
httpRedirect: false
enabled: false
regexPathStyle: nginx
kafka:
advertisedListeners: []
affinity: {}
allowEveryoneIfNoAclFound: true
allowPlaintextListener: true
args: []
auth:
clientProtocol: plaintext
externalClientProtocol: ""
interBrokerProtocol: plaintext
sasl:
interBrokerMechanism: plain
jaas:
clientPasswords: []
clientUsers:
- user
existingSecret: ""
interBrokerPassword: ""
interBrokerUser: admin
zookeeperPassword: ""
zookeeperUser: ""
mechanisms: plain,scram-sha-256,scram-sha-512
tls:
autoGenerated: false
endpointIdentificationAlgorithm: https
existingSecret: ""
existingSecrets: []
jksKeystoreSAN: ""
jksTruststore: ""
jksTruststoreSecret: ""
password: ""
pemChainIncluded: false
type: jks
zookeeper:
tls:
enabled: false
existingSecret: ""
existingSecretKeystoreKey: zookeeper.keystore.jks
existingSecretTruststoreKey: zookeeper.truststore.jks
passwordsSecret: ""
passwordsSecretKeystoreKey: keystore-password
passwordsSecretTruststoreKey: truststore-password
type: jks
verifyHostname: true
authorizerClassName: ""
autoCreateTopicsEnable: true
clusterDomain: cluster.local
command:
- /scripts/setup.sh
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
config: ""
containerPorts:
client: 9092
external: 9094
internal: 9093
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
defaultReplicationFactor: 3
deleteTopicEnable: false
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
existingLog4jConfigMap: ""
externalAccess:
autoDiscovery:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/kubectl
tag: 1.24.0-debian-10-r2
resources:
limits: {}
requests: {}
enabled: false
service:
annotations: {}
domain: ""
extraPorts: []
loadBalancerAnnotations: []
loadBalancerIPs: []
loadBalancerNames: []
loadBalancerSourceRanges: []
nodePorts: []
ports:
external: 9094
type: LoadBalancer
useHostIPs: false
usePodIPs: false
externalZookeeper:
servers: []
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
heapOpts: -Xmx1024m -Xms1024m
hostAliases: []
hostIPC: false
hostNetwork: false
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/kafka
tag: 3.1.1-debian-10-r6
initContainers: []
interBrokerListenerName: INTERNAL
kubeVersion: ""
lifecycleHooks: {}
listenerSecurityProtocolMap: ""
listeners: []
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
log4j: ""
logFlushIntervalMessages: _10000
logFlushIntervalMs: 1000
logPersistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: false
existingClaim: ""
mountPath: /opt/bitnami/kafka/logs
selector: {}
size: 8Gi
storageClass: ""
logRetentionBytes: _1073741824
logRetentionCheckIntervalMs: 300000
logRetentionHours: 168
logSegmentBytes: _1073741824
logsDirs: /bitnami/kafka/data
maxMessageBytes: "50000000"
metrics:
jmx:
config: |-
jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
lowercaseOutputName: true
lowercaseOutputLabelNames: true
ssl: false
{{- if .Values.metrics.jmx.whitelistObjectNames }}
whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"]
{{- end }}
containerPorts:
metrics: 5556
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
enabled: false
existingConfigmap: ""
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/jmx-exporter
tag: 0.16.1-debian-10-r303
resources:
limits: {}
requests: {}
service:
annotations:
prometheus.io/path: /
prometheus.io/port: '{{ .Values.metrics.jmx.service.ports.metrics }}'
prometheus.io/scrape: "true"
clusterIP: ""
ports:
metrics: 5556
sessionAffinity: None
whitelistObjectNames:
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
kafka:
affinity: {}
args: []
certificatesSecret: ""
command: []
containerPorts:
metrics: 9308
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
enabled: false
extraFlags: {}
extraVolumeMounts: []
extraVolumes: []
hostAliases: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/kafka-exporter
tag: 1.4.2-debian-10-r240
initContainers: []
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: true
fsGroup: 1001
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.metrics.kafka.service.ports.metrics }}'
prometheus.io/scrape: "true"
clusterIP: ""
ports:
metrics: 9308
sessionAffinity: None
serviceAccount:
automountServiceAccountToken: true
create: true
name: ""
sidecars: []
tlsCaCert: ca-file
tlsCaSecret: ""
tlsCert: cert-file
tlsKey: key-file
tolerations: []
serviceMonitor:
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
labels: {}
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
minBrokerId: 0
nameOverride: ""
networkPolicy:
allowExternal: true
egressRules:
customRules: []
enabled: false
explicitNamespacesSelector: {}
externalAccess:
from: []
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
numIoThreads: 8
numNetworkThreads: 3
numPartitions: 1
numRecoveryThreadsPerDataDir: 1
offsetsTopicReplicationFactor: 3
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: true
existingClaim: ""
mountPath: /bitnami/kafka
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
priorityClassName: ""
provisioning:
args: []
auth:
tls:
caCert: ca.crt
cert: tls.crt
certificatesSecret: ""
key: tls.key
keyPassword: ""
keyPasswordSecretKey: key-password
keystore: keystore.jks
keystorePassword: ""
keystorePasswordSecretKey: keystore-password
passwordsSecret: ""
truststore: truststore.jks
truststorePassword: ""
truststorePasswordSecretKey: truststore-password
type: jks
command: []
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
enabled: false
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraProvisioningCommands: []
extraVolumeMounts: []
extraVolumes: []
initContainers: []
numPartitions: 1
parallel: 1
podAnnotations: {}
podLabels: {}
podSecurityContext:
enabled: true
fsGroup: 1001
postScript: ""
preScript: ""
replicationFactor: 1
resources:
limits: {}
requests: {}
schedulerName: ""
sidecars: []
topics: []
waitForKafka: true
rbac:
create: false
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
replicaCount: 3
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
client: ""
external: ""
ports:
client: 9092
external: 9094
internal: 9093
sessionAffinity: None
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: true
name: ""
sidecars: []
socketReceiveBufferBytes: 102400
socketRequestMaxBytes: "50000000"
socketSendBufferBytes: 102400
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
superUsers: User:admin
terminationGracePeriodSeconds: ""
tolerations: []
topologySpreadConstraints: {}
transactionStateLogMinIsr: 3
transactionStateLogReplicationFactor: 3
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r431
resources:
limits: {}
requests: {}
zookeeper:
affinity: {}
args: []
auth:
clientPassword: ""
clientUser: ""
enabled: false
existingSecret: ""
serverPasswords: ""
serverUsers: ""
autopurge:
purgeInterval: 0
snapRetainCount: 3
clusterDomain: cluster.local
command:
- /scripts/setup.sh
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
configuration: ""
containerPorts:
client: 2181
election: 3888
follower: 2888
tls: 3181
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
dataLogDir: ""
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fourlwCommandsWhitelist: srvr, mntr, ruok
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
heapSize: 1024
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/zookeeper
tag: 3.8.0-debian-10-r63
initContainers: []
initLimit: 10
jvmFlags: ""
kubeVersion: ""
lifecycleHooks: {}
listenOnAllIPs: false
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
logLevel: ERROR
maxClientCnxns: 60
maxSessionTimeout: 40000
metrics:
containerPort: 9141
enabled: false
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
service:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.metrics.service.port }}'
prometheus.io/scrape: "true"
port: 9141
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
minServerId: 1
nameOverride: ""
namespaceOverride: ""
networkPolicy:
allowExternal: true
enabled: false
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataLogDir:
existingClaim: ""
selector: {}
size: 8Gi
enabled: true
existingClaim: ""
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
preAllocSize: 65536
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
replicaCount: 1
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
service:
annotations: {}
clusterIP: ""
disableBaseClientPort: false
externalTrafficPolicy: Cluster
extraPorts: []
headless:
annotations: {}
publishNotReadyAddresses: true
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
client: ""
tls: ""
ports:
client: 2181
election: 3888
follower: 2888
tls: 3181
sessionAffinity: None
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: false
name: ""
sidecars: []
snapCount: 100000
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
syncLimit: 5
tickTime: 2000
tls:
client:
auth: none
autoGenerated: false
enabled: false
existingSecret: ""
existingSecretKeystoreKey: ""
existingSecretTruststoreKey: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks
passwordsSecretKeystoreKey: ""
passwordsSecretName: ""
passwordsSecretTruststoreKey: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks
quorum:
auth: none
autoGenerated: false
enabled: false
existingSecret: ""
existingSecretKeystoreKey: ""
existingSecretTruststoreKey: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks
passwordsSecretKeystoreKey: ""
passwordsSecretName: ""
passwordsSecretTruststoreKey: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks
resources:
limits: {}
requests: {}
tolerations: []
topologySpreadConstraints: {}
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r430
resources:
limits: {}
requests: {}
zookeeperChrootPath: ""
zookeeperConnectionTimeoutMs: 6000
mail:
backend: dummy
from: ""
host: ""
password: ""
port: 25
useSsl: false
useTls: false
username: ""
memcached:
affinity: {}
architecture: standalone
args:
- memcached
- -u memcached
- -p 11211
- -v
- -m $(MEMCACHED_MEMORY_LIMIT)
- -I $(MEMCACHED_MAX_ITEM_SIZE)
auth:
enabled: false
password: ""
username: ""
autoscaling:
enabled: false
maxReplicas: 6
minReplicas: 3
targetCPU: 50
targetMemory: 50
clusterDomain: cluster.local
command: []
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
containerPorts:
memcached: 11211
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: sentry-memcached
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/memcached
tag: 1.6.15-debian-11-r10
initContainers: []
kubeVersion: ""
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
maxItemSize: "26214400"
memoryLimit: "2048"
metrics:
containerPorts:
metrics: 9150
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/memcached-exporter
tag: 0.10.0-debian-11-r2
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
podAnnotations:
prometheus.io/port: '{{ .Values.metrics.containerPorts.metrics }}'
prometheus.io/scrape: "true"
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
resources:
limits: {}
requests: {}
service:
annotations:
prometheus.io/port: '{{ .Values.metrics.service.ports.metrics }}'
prometheus.io/scrape: "true"
clusterIP: ""
ports:
metrics: 9150
sessionAffinity: None
serviceMonitor:
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
labels: {}
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
nameOverride: ""
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: false
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
replicaCount: 1
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
memcached: ""
ports:
memcached: 11211
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: false
name: ""
sidecars: []
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
terminationGracePeriodSeconds: ""
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r10
resources:
limits: {}
requests: {}
metrics:
affinity: {}
enabled: false
image:
pullPolicy: IfNotPresent
repository: prom/statsd-exporter
tag: v0.17.0
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
nodeSelector: {}
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
resources: {}
securityContext: {}
service:
labels: {}
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
namespace: ""
namespaceSelector: {}
scrapeInterval: 30s
tolerations: []
nginx:
affinity: {}
args: []
autoscaling:
enabled: false
maxReplicas: ""
minReplicas: ""
targetCPU: ""
targetMemory: ""
cloneStaticSiteFromGit:
branch: ""
enabled: false
extraEnvVars: []
extraVolumeMounts: []
gitClone:
args: []
command: []
gitSync:
args: []
command: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/git
tag: 2.36.1-debian-11-r2
interval: 60
repository: ""
clusterDomain: cluster.local
command: []
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
commonAnnotations: {}
commonLabels: {}
containerPort: 8080
containerPorts:
http: 8080
https: ""
containerSecurityContext:
enabled: false
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingServerBlockConfigmap: '{{ template "sentry.fullname" . }}'
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
healthIngress:
annotations: {}
enabled: false
extraHosts: []
extraPaths: []
extraRules: []
extraTls: []
hostname: example.local
ingressClassName: ""
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
hostAliases: []
hostIPC: false
hostNetwork: false
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/nginx
tag: 1.22.0-debian-11-r3
ingress:
annotations: {}
apiVersion: ""
enabled: false
extraHosts: []
extraPaths: []
extraRules: []
extraTls: []
hostname: nginx.local
ingressClassName: ""
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
initContainers: []
kubeVersion: ""
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
metrics:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/nginx-exporter
tag: 0.10.0-debian-11-r2
podAnnotations: {}
port: ""
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
resources:
limits: {}
requests: {}
securityContext:
enabled: false
runAsUser: 1001
service:
annotations:
prometheus.io/port: '{{ .Values.metrics.service.port }}'
prometheus.io/scrape: "true"
port: 9113
serviceMonitor:
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
labels: {}
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
nameOverride: ""
namespaceOverride: ""
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 0
minAvailable: 1
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: false
fsGroup: 1001
sysctls: []
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
replicaCount: 1
resources:
limits: {}
requests: {}
schedulerName: ""
serverBlock: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
http: ""
https: ""
ports:
http: 80
https: 443
sessionAffinity: None
sessionAffinityConfig: {}
targetPort:
http: http
https: https
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: false
create: false
name: ""
sidecarSingleProcessNamespace: false
sidecars: []
startupProbe:
enabled: false
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
staticSiteConfigmap: ""
staticSitePVC: ""
terminationGracePeriodSeconds: ""
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
postgresql:
audit:
clientMinMessages: error
logConnections: false
logDisconnections: false
logHostname: false
logLinePrefix: ""
logTimezone: ""
pgAuditLog: ""
pgAuditLogCatalog: "off"
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
postgresql:
existingSecret: ""
postgresqlDatabase: ""
postgresqlPassword: ""
postgresqlUsername: ""
replicationPassword: ""
servicePort: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
configurationConfigMap: ""
containerPorts:
postgresql: 5432
containerSecurityContext:
enabled: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingSecret: ""
extendedConfConfigMap: ""
extraDeploy: []
extraEnv: []
extraEnvVarsCM: ""
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
postgresql:
existingSecret: ""
postgresqlDatabase: ""
postgresqlPassword: ""
postgresqlUsername: ""
replicationPassword: ""
servicePort: ""
storageClass: ""
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/postgresql
tag: 11.14.0-debian-10-r28
initdbPassword: ""
initdbScripts: {}
initdbScriptsConfigMap: ""
initdbScriptsSecret: ""
initdbUser: ""
ldap:
baseDN: ""
bind_password: ""
bindDN: ""
enabled: false
port: ""
prefix: ""
scheme: ""
search_attr: ""
search_filter: ""
server: ""
suffix: ""
tls: ""
url: ""
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
metrics:
customMetrics: {}
enabled: false
extraEnvVars: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.10.0-debian-10-r172
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
enabled: false
runAsUser: 1001
service:
annotations:
prometheus.io/port: "9187"
prometheus.io/scrape: "true"
loadBalancerIP: ""
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
interval: ""
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
nameOverride: sentry-postgresql
networkPolicy:
allowExternal: true
enabled: false
explicitNamespacesSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: true
existingClaim: ""
mountPath: /bitnami/postgresql
selector: {}
size: 8Gi
snapshotName: ""
storageClass: ""
subPath: ""
pgHbaConfiguration: ""
postgresqlConfiguration: {}
postgresqlDataDir: /bitnami/postgresql/data
postgresqlDatabase: sentry
postgresqlDbUserConnectionLimit: ""
postgresqlExtendedConf: {}
postgresqlInitdbArgs: ""
postgresqlInitdbWalDir: ""
postgresqlMaxConnections: ""
postgresqlPassword: ""
postgresqlPghbaRemoveFilters: ""
postgresqlPostgresConnectionLimit: ""
postgresqlPostgresPassword: ""
postgresqlSharedPreloadLibraries: pgaudit
postgresqlStatementTimeout: ""
postgresqlTcpKeepalivesCount: ""
postgresqlTcpKeepalivesIdle: ""
postgresqlTcpKeepalivesInterval: ""
postgresqlUsername: postgres
primary:
affinity: {}
annotations: {}
extraInitContainers: []
extraPodSpec: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
priorityClassName: ""
service:
clusterIP: ""
nodePort: ""
type: ""
sidecars: []
tolerations: []
primaryAsStandBy:
enabled: false
primaryHost: ""
primaryPort: ""
psp:
create: false
rbac:
create: false
readReplicas:
affinity: {}
annotations: {}
extraInitContainers: []
extraPodSpec: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
persistence:
enabled: true
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
priorityClassName: ""
resources: {}
service:
clusterIP: ""
nodePort: ""
type: ""
sidecars: []
tolerations: []
topologySpreadConstraints: []
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
replication:
applicationName: sentry
enabled: false
numSynchronousReplicas: 1
password: repl_password
readReplicas: 2
singleService: true
synchronousCommit: "on"
uniqueServices: false
user: repl_user
resources:
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
securityContext:
enabled: true
fsGroup: 1001
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePort: ""
port: 5432
type: ClusterIP
serviceAccount:
autoMount: false
enabled: false
name: ""
shmVolume:
chmod:
enabled: true
enabled: true
sizeLimit: ""
startupProbe:
enabled: false
failureThreshold: 10
initialDelaySeconds: 30
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: ""
tls:
autoGenerated: false
certCAFilename: ""
certFilename: ""
certKeyFilename: ""
certificatesSecret: ""
crlFilename: ""
enabled: false
preferServerCiphers: true
updateStrategy:
type: RollingUpdate
usePasswordFile: false
volumePermissions:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r305
securityContext:
runAsUser: 0
prefix: null
rabbitmq:
advancedConfiguration: ""
affinity: {}
args: []
auth:
erlangCookie: pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA
existingErlangSecret: ""
existingPasswordSecret: ""
password: guest
tls:
autoGenerated: false
caCertificate: ""
enabled: false
existingSecret: ""
existingSecretFullChain: false
failIfNoPeerCert: true
serverCertificate: ""
serverKey: ""
sslOptionsVerify: verify_peer
username: guest
clusterDomain: cluster.local
clustering:
addressType: hostname
enabled: true
forceBoot: true
partitionHandling: autoheal
rebalance: true
command: []
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
communityPlugins: ""
configuration: |-
## Username and password
##
default_user = {{ .Values.auth.username }}
default_pass = CHANGEME
{{- if .Values.clustering.enabled }}
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }}
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = {{ .Values.clustering.partitionHandling }}
{{- end }}
{{- if .Values.loadDefinition.enabled }}
load_definitions = {{ .Values.loadDefinition.file }}
{{- end }}
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
{{ tpl .Values.extraConfiguration . }}
{{- if .Values.auth.tls.enabled }}
ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }}
listeners.ssl.default = {{ .Values.service.tlsPort }}
ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }}
ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem
ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem
ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem
{{- end }}
{{- if .Values.ldap.enabled }}
auth_backends.1 = rabbit_auth_backend_ldap
auth_backends.2 = internal
{{- range $index, $server := .Values.ldap.servers }}
auth_ldap.servers.{{ add $index 1 }} = {{ $server }}
{{- end }}
auth_ldap.port = {{ .Values.ldap.port }}
auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }}
{{- if .Values.ldap.tls.enabled }}
auth_ldap.use_ssl = true
{{- end }}
{{- end }}
{{- if .Values.metrics.enabled }}
## Prometheus metrics
##
prometheus.tcp.port = 9419
{{- end }}
{{- if .Values.memoryHighWatermark.enabled }}
## Memory Threshold
##
total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }}
vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }}
{{- end }}
containerSecurityContext: {}
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
dnsConfig: {}
dnsPolicy: ""
enabled: true
extraConfiguration: |
load_definitions = /app/load_definition.json
extraContainerPorts: []
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraPlugins: rabbitmq_auth_backend_ldap
extraSecrets:
load-definition:
load_definition.json: |
{
"users": [
{
"name": "{{ .Values.auth.username }}",
"password": "{{ .Values.auth.password }}",
"tags": "administrator"
}
],
"permissions": [{
"user": "{{ .Values.auth.username }}",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
}],
"policies": [
{
"name": "ha-all",
"pattern": ".*",
"vhost": "/",
"definition": {
"ha-mode": "all",
"ha-sync-mode": "automatic",
"ha-sync-batch-size": 1
}
}
],
"vhosts": [
{
"name": "/"
}
]
}
extraSecretsPrependReleaseName: false
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/rabbitmq
tag: 3.9.16-debian-10-r0
ingress:
annotations: {}
enabled: false
extraHosts: []
extraRules: []
extraTls: []
hostname: rabbitmq.local
ingressClassName: ""
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
initContainers: []
kubeVersion: ""
ldap:
enabled: false
port: "389"
servers: []
tls:
enabled: false
user_dn_pattern: cn=${username},dc=example,dc=org
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 20
loadDefinition:
enabled: true
existingSecret: load-definition
file: /app/load_definition.json
logs: '-'
maxAvailableSchedulers: ""
memoryHighWatermark:
enabled: false
type: relative
value: 0.4
metrics:
enabled: false
plugins: rabbitmq_prometheus
podAnnotations:
prometheus.io/port: '{{ .Values.service.metricsPort }}'
prometheus.io/scrape: "true"
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: 30s
metricRelabelings: []
namespace: ""
path: ""
podTargetLabels: {}
relabelings: []
relabellings: []
scrapeTimeout: ""
targetLabels: {}
nameOverride: ""
networkPolicy:
additionalRules: []
allowExternal: true
enabled: false
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
onlineSchedulers: ""
pdb:
create: true
maxUnavailable: ""
minAvailable: 1
persistence:
accessMode: ReadWriteOnce
annotations: {}
enabled: true
existingClaim: ""
mountPath: /bitnami/rabbitmq/mnesia
selector: {}
size: 8Gi
storageClass: ""
subPath: ""
volumes: []
plugins: rabbitmq_management rabbitmq_peer_discovery_k8s
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: OrderedReady
podSecurityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
priorityClassName: ""
rbac:
create: true
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 20
replicaCount: 3
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
annotationsHeadless: {}
distNodePort: ""
distPort: 25672
distPortEnabled: true
distPortName: dist
epmdNodePort: ""
epmdPortEnabled: true
epmdPortName: epmd
externalIPs: []
externalTrafficPolicy: Cluster
extraPorts: []
labels: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
managerNodePort: ""
managerPort: 15672
managerPortEnabled: true
managerPortName: http-stats
metricsNodePort: ""
metricsPort: 9419
metricsPortName: metrics
nodePort: ""
port: 5672
portEnabled: true
portName: amqp
tlsNodePort: ""
tlsPort: 5671
tlsPortName: amqp-ssl
type: ClusterIP
serviceAccount:
automountServiceAccountToken: true
create: true
name: ""
sidecars: []
statefulsetLabels: {}
terminationGracePeriodSeconds: 120
tolerations: []
topologySpreadConstraints: []
ulimitNofiles: "65536"
updateStrategyType: RollingUpdate
volumePermissions:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r408
resources:
limits: {}
requests: {}
redis:
architecture: replication
auth:
enabled: false
existingSecret: ""
existingSecretPasswordKey: ""
password: ""
sentinel: false
usePasswordFiles: false
clusterDomain: cluster.local
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
redis:
password: ""
storageClass: ""
commonAnnotations: {}
commonConfiguration: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
commonLabels: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
extraDeploy: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
redis:
password: ""
storageClass: ""
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/redis
tag: 6.2.7-debian-11-r3
kubeVersion: ""
master:
affinity: {}
args: []
command: []
configuration: ""
containerPorts:
redis: 6379
containerSecurityContext:
enabled: true
runAsUser: 1001
count: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
disableCommands:
- FLUSHDB
- FLUSHALL
dnsConfig: {}
dnsPolicy: ""
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraFlags: []
extraVolumeMounts: []
extraVolumes: []
hostAliases: []
initContainers: []
kind: StatefulSet
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataSource: {}
enabled: true
existingClaim: ""
medium: ""
path: /data
selector: {}
size: 8Gi
sizeLimit: ""
storageClass: ""
subPath: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: true
fsGroup: 1001
preExecCmds: []
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
internalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
redis: ""
ports:
redis: 6379
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
shareProcessNamespace: false
sidecars: []
startupProbe:
enabled: false
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: 30
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
metrics:
command: []
containerSecurityContext:
enabled: true
runAsUser: 1001
enabled: false
extraArgs: {}
extraEnvVars: []
extraVolumeMounts: []
extraVolumes: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/redis-exporter
tag: 1.40.0-debian-11-r0
podAnnotations:
prometheus.io/port: "9121"
prometheus.io/scrape: "true"
podLabels: {}
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
redisTargetHost: localhost
resources:
limits: {}
requests: {}
service:
annotations: {}
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
port: 9121
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: 30s
metricRelabelings: []
namespace: ""
relabellings: []
scrapeTimeout: ""
nameOverride: sentry-redis
networkPolicy:
allowExternal: true
enabled: false
extraEgress: []
extraIngress: []
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
pdb:
create: false
maxUnavailable: ""
minAvailable: 1
podSecurityPolicy:
create: false
enabled: false
rbac:
create: false
rules: []
replica:
affinity: {}
args: []
autoscaling:
enabled: false
maxReplicas: 11
minReplicas: 1
targetCPU: ""
targetMemory: ""
command: []
configuration: ""
containerPorts:
redis: 6379
containerSecurityContext:
enabled: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
disableCommands:
- FLUSHDB
- FLUSHALL
dnsConfig: {}
dnsPolicy: ""
externalMaster:
enabled: false
host: ""
port: 6379
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraFlags: []
extraVolumeMounts: []
extraVolumes: []
hostAliases: []
initContainers: []
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataSource: {}
enabled: true
existingClaim: ""
medium: ""
path: /data
selector: {}
size: 8Gi
sizeLimit: ""
storageClass: ""
subPath: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: ""
podSecurityContext:
enabled: true
fsGroup: 1001
preExecCmds: []
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
replicaCount: 3
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
internalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
redis: ""
ports:
redis: 6379
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
shareProcessNamespace: false
sidecars: []
startupProbe:
enabled: true
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: 30
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
secretAnnotations: {}
sentinel:
args: []
automateClusterRecovery: false
command: []
configuration: ""
containerPorts:
sentinel: 26379
containerSecurityContext:
enabled: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
downAfterMilliseconds: 60000
enabled: false
externalMaster:
enabled: false
host: ""
port: 6379
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
failoverTimeout: 18000
getMasterTimeout: 220
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/redis-sentinel
tag: 6.2.7-debian-11-r4
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
masterSet: mymaster
parallelSyncs: 1
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataSource: {}
enabled: false
medium: ""
selector: {}
size: 100Mi
storageClass: ""
preExecCmds: []
quorum: 2
readinessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
resources:
limits: {}
requests: {}
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
redis: ""
sentinel: ""
ports:
redis: 6379
sentinel: 26379
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
startupProbe:
enabled: true
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: 30
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: true
name: ""
sysctl:
command: []
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r3
mountHostSys: false
resources:
limits: {}
requests: {}
tls:
authClients: true
autoGenerated: false
certCAFilename: ""
certFilename: ""
certKeyFilename: ""
certificatesSecret: ""
dhParamsFilename: ""
enabled: false
existingSecret: ""
useExternalDNS:
additionalAnnotations: {}
annotationKey: external-dns.alpha.kubernetes.io/
enabled: false
suffix: ""
usePassword: false
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r3
resources:
limits: {}
requests: {}
relay:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
env: []
mode: managed
nodeSelector: {}
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
replicas: 1
resources: {}
securityContext: {}
service:
annotations: {}
sidecars: []
volumes: []
revisionHistoryLimit: 10
sentry:
cleanup:
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
days: 90
enabled: true
failedJobsHistoryLimit: 5
schedule: 0 0 * * *
serviceAccount: {}
sidecars: []
successfulJobsHistoryLimit: 5
volumes: []
cron:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
sidecars: []
volumes: []
features:
orgSubdomains: false
vstsLimitedScopes: true
ingestConsumer:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 3
minReplicas: 1
targetCPUUtilizationPercentage: 50
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
postProcessForward:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
singleOrganization: true
subscriptionConsumerEvents:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
subscriptionConsumerTransactions:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
web:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
env: []
nodeSelector: {}
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
replicas: 1
resources: {}
securityContext: {}
service:
annotations: {}
sidecars: []
strategyType: RollingUpdate
volumes: []
worker:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
env: []
livenessProbe:
enabled: false
failureThreshold: 3
periodSeconds: 60
timeoutSeconds: 10
nodeSelector: {}
replicas: 3
resources: {}
sidecars: []
volumes: []
service:
annotations: {}
externalPort: 9000
name: sentry
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
enabled: false
name: sentry
slack: {}
snuba:
api:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
command: {}
env: []
liveness:
timeoutSeconds: 2
nodeSelector: {}
probeInitialDelaySeconds: 10
readiness:
timeoutSeconds: 2
replicas: 1
resources: {}
securityContext: {}
service:
annotations: {}
sidecars: []
volumes: []
cleanupErrors:
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
enabled: true
schedule: 0 * * * *
serviceAccount: {}
sidecars: []
successfulJobsHistoryLimit: 5
volumes: []
cleanupTransactions:
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
enabled: true
failedJobsHistoryLimit: 5
schedule: 0 * * * *
serviceAccount: {}
sidecars: []
successfulJobsHistoryLimit: 5
volumes: []
consumer:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
dbInitJob:
env: []
migrateJob:
env: []
outcomesConsumer:
affinity: {}
autoOffsetReset: earliest
env: []
maxBatchSize: "3"
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
replacer:
affinity: {}
autoOffsetReset: earliest
env: []
maxBatchSize: "3"
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sessionsConsumer:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
subscriptionConsumerEvents:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
subscriptionConsumerTransactions:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
transactionsConsumer:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sourcemaps:
enabled: false
symbolicator:
api:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
config: |-
# See: https://getsentry.github.io/symbolicator/#configuration
cache_dir: "/data"
bind: "0.0.0.0:3021"
logging:
level: "warn"
metrics:
statsd: null
prefix: "symbolicator"
sentry_dsn: null
connect_to_reserved_ips: true
# caches:
# downloaded:
# max_unused_for: 1w
# retry_misses_after: 5m
# retry_malformed_after: 5m
# derived:
# max_unused_for: 1w
# retry_misses_after: 5m
# retry_malformed_after: 5m
# diagnostics:
# retention: 1w
env: []
nodeSelector: {}
probeInitialDelaySeconds: 10
replicas: 1
resources: {}
securityContext: {}
cleanup:
enabled: false
enabled: false
system:
adminEmail: ""
public: false
url: ""
user:
create: true
email: admin@sentry.local
password: aaaa
zookeeper:
affinity: {}
args: []
auth:
clientPassword: ""
clientUser: ""
enabled: false
existingSecret: ""
serverPasswords: ""
serverUsers: ""
autopurge:
purgeInterval: 0
snapRetainCount: 3
clusterDomain: cluster.local
command:
- /scripts/setup.sh
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
configuration: ""
containerPorts:
client: 2181
election: 3888
follower: 2888
tls: 3181
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
dataLogDir: ""
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fourlwCommandsWhitelist: srvr, mntr, ruok
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
heapSize: 1024
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/zookeeper
tag: 3.8.0-debian-10-r0
initContainers: []
initLimit: 10
jvmFlags: ""
kubeVersion: ""
lifecycleHooks: {}
listenOnAllIPs: false
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
logLevel: ERROR
maxClientCnxns: 60
maxSessionTimeout: 40000
metrics:
containerPort: 9141
enabled: false
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
service:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.metrics.service.port }}'
prometheus.io/scrape: "true"
port: 9141
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
minServerId: 1
nameOverride: zookeeper-clickhouse
namespaceOverride: ""
networkPolicy:
allowExternal: true
enabled: false
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataLogDir:
existingClaim: ""
selector: {}
size: 8Gi
enabled: true
existingClaim: ""
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
preAllocSize: 65536
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
replicaCount: 3
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
service:
annotations: {}
clusterIP: ""
disableBaseClientPort: false
externalTrafficPolicy: Cluster
extraPorts: []
headless:
annotations: {}
publishNotReadyAddresses: true
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
client: ""
tls: ""
ports:
client: 2181
election: 3888
follower: 2888
tls: 3181
sessionAffinity: None
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: false
name: ""
sidecars: []
snapCount: 100000
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
syncLimit: 5
tickTime: 2000
tls:
client:
autoGenerated: false
enabled: false
existingSecret: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks
passwordsSecretName: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks
quorum:
autoGenerated: false
enabled: false
existingSecret: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks
passwordsSecretName: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks
resources:
limits: {}
requests: {}
tolerations: []
topologySpreadConstraints: {}
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r368
resources:
limits: {}
requests: {}
COMPUTED VALUES:
asHook: true
auth:
register: true
clickhouse:
clickhouse:
configmap:
builtin_dictionaries_reload_interval: "3600"
compression:
cases:
- method: zstd
min_part_size: "10000000000"
min_part_size_ratio: "0.01"
enabled: false
default_session_timeout: "60"
disable_internal_dns_cache: "1"
enabled: true
graphite:
config:
- asynchronous_metrics: true
events: true
events_cumulative: true
interval: "60"
metrics: true
root_path: one_min
timeout: "0.1"
enabled: false
keep_alive_timeout: "3"
logger:
count: "10"
level: trace
path: /var/log/clickhouse-server
size: 1000M
stdoutLogsEnabled: false
mark_cache_size: "5368709120"
max_concurrent_queries: "100"
max_connections: "4096"
max_session_timeout: "3600"
mlock_executable: false
profiles:
enabled: false
profile:
- config:
load_balancing: random
max_memory_usage: "10000000000"
use_uncompressed_cache: "0"
name: default
quotas:
enabled: false
quota:
- config:
- duration: "3600"
errors: "0"
execution_time: "0"
queries: "0"
read_rows: "0"
result_rows: "0"
name: default
remote_servers:
enabled: true
internal_replication: true
replica:
backup:
enabled: false
compression: true
user: default
umask: "022"
uncompressed_cache_size: "8589934592"
users:
enabled: false
user:
- config:
networks:
- ::/0
password: ""
profile: default
quota: default
name: default
zookeeper_servers:
config:
- hostTemplate: '{{ .Release.Name }}-zookeeper-clickhouse'
index: clickhouse
port: "2181"
enabled: true
operation_timeout_ms: "10000"
session_timeout_ms: "30000"
http_port: "8123"
image: yandex/clickhouse-server
imagePullPolicy: IfNotPresent
imageVersion: 20.8.19.4
ingress:
enabled: false
init:
image: busybox
imagePullPolicy: IfNotPresent
imageVersion: 1.31.0
resources: {}
interserver_http_port: "9009"
livenessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
metrics:
enabled: false
image:
port: 9116
pullPolicy: IfNotPresent
registry: docker.io
repository: f1yegor/clickhouse-exporter
tag: latest
podAnnotations:
prometheus.io/port: "9116"
prometheus.io/scrape: "true"
podLabels: {}
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
service:
annotations: {}
labels: {}
type: ClusterIP
serviceMonitor:
enabled: false
selector:
prometheus: kube-prometheus
path: /var/lib/clickhouse
persistentVolumeClaim:
dataPersistentVolume:
accessModes:
- ReadWriteOnce
enabled: true
storage: 30Gi
enabled: true
logsPersistentVolume:
accessModes:
- ReadWriteOnce
enabled: false
storage: 50Gi
podManagementPolicy: Parallel
podSecurityContext: {}
readinessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
replicas: "3"
resources: {}
securityContext: {}
tcp_port: "9000"
updateStrategy: RollingUpdate
clusterDomain: cluster.local
enabled: true
global: {}
serviceAccount:
annotations: {}
automountServiceAccountToken: true
enabled: false
name: clickhouse
tabix:
enabled: false
image: spoonest/clickhouse-tabix-web-client
imagePullPolicy: IfNotPresent
imageVersion: stable
ingress:
enabled: false
livenessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
readinessProbe:
enabled: true
failureThreshold: "3"
initialDelaySeconds: "30"
periodSeconds: "30"
successThreshold: "1"
timeoutSeconds: "5"
replicas: "1"
resources: {}
security:
password: admin
user: admin
updateStrategy:
maxSurge: 3
maxUnavailable: 1
type: RollingUpdate
timezone: UTC
config:
configYml: {}
relay: |
# No YAML relay config given
sentryConfPy: |
# No Python Extension Config Given
snubaSettingsPy: |
# No Python Extension Config Given
externalClickhouse:
database: default
host: clickhouse
httpPort: 8123
password: ""
singleNode: true
tcpPort: 9000
username: default
externalKafka:
port: 9092
externalPostgresql:
database: sentry
port: 5432
username: postgres
externalRedis:
port: 6379
filestore:
backend: filesystem
filesystem:
path: /var/lib/sentry/files
persistence:
accessMode: ReadWriteOnce
enabled: true
existingClaim: ""
persistentWorkers: false
size: 10Gi
gcs: {}
s3: {}
geodata:
mountPath: ""
path: ""
volumeName: ""
github: {}
google: {}
hooks:
activeDeadlineSeconds: 100
dbCheck:
affinity: {}
env: []
image:
imagePullSecrets: []
nodeSelector: {}
podAnnotations: {}
resources:
limits:
memory: 64Mi
requests:
cpu: 100m
memory: 64Mi
securityContext: {}
dbInit:
affinity: {}
env: []
nodeSelector: {}
podAnnotations: {}
resources:
limits:
memory: 2048Mi
requests:
cpu: 300m
memory: 2048Mi
sidecars: []
volumes: []
enabled: true
removeOnSuccess: true
shareProcessNamespace: false
snubaInit:
affinity: {}
nodeSelector: {}
podAnnotations: {}
resources:
limits:
cpu: 2000m
memory: 1Gi
requests:
cpu: 700m
memory: 1Gi
snubaMigrate: {}
images:
relay:
imagePullSecrets: []
sentry:
imagePullSecrets: []
snuba:
imagePullSecrets: []
symbolicator:
imagePullSecrets: []
tag: 0.5.1
ingress:
alb:
httpRedirect: false
enabled: false
regexPathStyle: nginx
kafka:
advertisedListeners: []
affinity: {}
allowEveryoneIfNoAclFound: true
allowPlaintextListener: true
args: []
auth:
clientProtocol: plaintext
externalClientProtocol: ""
interBrokerProtocol: plaintext
sasl:
interBrokerMechanism: plain
jaas:
clientPasswords: []
clientUsers:
- user
existingSecret: ""
interBrokerPassword: ""
interBrokerUser: admin
zookeeperPassword: ""
zookeeperUser: ""
mechanisms: plain,scram-sha-256,scram-sha-512
tls:
autoGenerated: false
endpointIdentificationAlgorithm: https
existingSecret: ""
existingSecrets: []
jksKeystoreSAN: ""
jksTruststore: ""
jksTruststoreSecret: ""
password: ""
pemChainIncluded: false
type: jks
zookeeper:
tls:
enabled: false
existingSecret: ""
existingSecretKeystoreKey: zookeeper.keystore.jks
existingSecretTruststoreKey: zookeeper.truststore.jks
passwordsSecret: ""
passwordsSecretKeystoreKey: keystore-password
passwordsSecretTruststoreKey: truststore-password
type: jks
verifyHostname: true
authorizerClassName: ""
autoCreateTopicsEnable: true
clusterDomain: cluster.local
command:
- /scripts/setup.sh
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
config: ""
containerPorts:
client: 9092
external: 9094
internal: 9093
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
defaultReplicationFactor: 3
deleteTopicEnable: false
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
existingLog4jConfigMap: ""
externalAccess:
autoDiscovery:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/kubectl
tag: 1.24.0-debian-10-r2
resources:
limits: {}
requests: {}
enabled: false
service:
annotations: {}
domain: ""
extraPorts: []
loadBalancerAnnotations: []
loadBalancerIPs: []
loadBalancerNames: []
loadBalancerSourceRanges: []
nodePorts: []
ports:
external: 9094
type: LoadBalancer
useHostIPs: false
usePodIPs: false
externalZookeeper:
servers: []
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
heapOpts: -Xmx1024m -Xms1024m
hostAliases: []
hostIPC: false
hostNetwork: false
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/kafka
tag: 3.1.1-debian-10-r6
initContainers: []
interBrokerListenerName: INTERNAL
kubeVersion: ""
lifecycleHooks: {}
listenerSecurityProtocolMap: ""
listeners: []
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
log4j: ""
logFlushIntervalMessages: _10000
logFlushIntervalMs: 1000
logPersistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: false
existingClaim: ""
mountPath: /opt/bitnami/kafka/logs
selector: {}
size: 8Gi
storageClass: ""
logRetentionBytes: _1073741824
logRetentionCheckIntervalMs: 300000
logRetentionHours: 168
logSegmentBytes: _1073741824
logsDirs: /bitnami/kafka/data
maxMessageBytes: "50000000"
metrics:
jmx:
config: |-
jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi
lowercaseOutputName: true
lowercaseOutputLabelNames: true
ssl: false
{{- if .Values.metrics.jmx.whitelistObjectNames }}
whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"]
{{- end }}
containerPorts:
metrics: 5556
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
enabled: false
existingConfigmap: ""
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/jmx-exporter
tag: 0.16.1-debian-10-r303
resources:
limits: {}
requests: {}
service:
annotations:
prometheus.io/path: /
prometheus.io/port: '{{ .Values.metrics.jmx.service.ports.metrics }}'
prometheus.io/scrape: "true"
clusterIP: ""
ports:
metrics: 5556
sessionAffinity: None
whitelistObjectNames:
- kafka.controller:*
- kafka.server:*
- java.lang:*
- kafka.network:*
- kafka.log:*
kafka:
affinity: {}
args: []
certificatesSecret: ""
command: []
containerPorts:
metrics: 9308
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
enabled: false
extraFlags: {}
extraVolumeMounts: []
extraVolumes: []
hostAliases: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/kafka-exporter
tag: 1.4.2-debian-10-r240
initContainers: []
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: true
fsGroup: 1001
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.metrics.kafka.service.ports.metrics }}'
prometheus.io/scrape: "true"
clusterIP: ""
ports:
metrics: 9308
sessionAffinity: None
serviceAccount:
automountServiceAccountToken: true
create: true
name: ""
sidecars: []
tlsCaCert: ca-file
tlsCaSecret: ""
tlsCert: cert-file
tlsKey: key-file
tolerations: []
serviceMonitor:
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
labels: {}
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
minBrokerId: 0
nameOverride: ""
networkPolicy:
allowExternal: true
egressRules:
customRules: []
enabled: false
explicitNamespacesSelector: {}
externalAccess:
from: []
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
numIoThreads: 8
numNetworkThreads: 3
numPartitions: 1
numRecoveryThreadsPerDataDir: 1
offsetsTopicReplicationFactor: 3
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: true
existingClaim: ""
mountPath: /bitnami/kafka
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
priorityClassName: ""
provisioning:
args: []
auth:
tls:
caCert: ca.crt
cert: tls.crt
certificatesSecret: ""
key: tls.key
keyPassword: ""
keyPasswordSecretKey: key-password
keystore: keystore.jks
keystorePassword: ""
keystorePasswordSecretKey: keystore-password
passwordsSecret: ""
truststore: truststore.jks
truststorePassword: ""
truststorePasswordSecretKey: truststore-password
type: jks
command: []
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
enabled: false
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraProvisioningCommands: []
extraVolumeMounts: []
extraVolumes: []
initContainers: []
numPartitions: 1
parallel: 1
podAnnotations: {}
podLabels: {}
podSecurityContext:
enabled: true
fsGroup: 1001
postScript: ""
preScript: ""
replicationFactor: 1
resources:
limits: {}
requests: {}
schedulerName: ""
sidecars: []
topics: []
waitForKafka: true
rbac:
create: false
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
replicaCount: 3
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
client: ""
external: ""
ports:
client: 9092
external: 9094
internal: 9093
sessionAffinity: None
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: true
name: ""
sidecars: []
socketReceiveBufferBytes: 102400
socketRequestMaxBytes: "50000000"
socketSendBufferBytes: 102400
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
superUsers: User:admin
terminationGracePeriodSeconds: ""
tolerations: []
topologySpreadConstraints: {}
transactionStateLogMinIsr: 3
transactionStateLogReplicationFactor: 3
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r431
resources:
limits: {}
requests: {}
zookeeper:
affinity: {}
args: []
auth:
clientPassword: ""
clientUser: ""
enabled: false
existingSecret: ""
serverPasswords: ""
serverUsers: ""
autopurge:
purgeInterval: 0
snapRetainCount: 3
clusterDomain: cluster.local
command:
- /scripts/setup.sh
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
configuration: ""
containerPorts:
client: 2181
election: 3888
follower: 2888
tls: 3181
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
dataLogDir: ""
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fourlwCommandsWhitelist: srvr, mntr, ruok
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
heapSize: 1024
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/zookeeper
tag: 3.8.0-debian-10-r63
initContainers: []
initLimit: 10
jvmFlags: ""
kubeVersion: ""
lifecycleHooks: {}
listenOnAllIPs: false
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
logLevel: ERROR
maxClientCnxns: 60
maxSessionTimeout: 40000
metrics:
containerPort: 9141
enabled: false
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
service:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.metrics.service.port }}'
prometheus.io/scrape: "true"
port: 9141
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
minServerId: 1
nameOverride: ""
namespaceOverride: ""
networkPolicy:
allowExternal: true
enabled: false
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataLogDir:
existingClaim: ""
selector: {}
size: 8Gi
enabled: true
existingClaim: ""
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
preAllocSize: 65536
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
replicaCount: 1
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
service:
annotations: {}
clusterIP: ""
disableBaseClientPort: false
externalTrafficPolicy: Cluster
extraPorts: []
headless:
annotations: {}
publishNotReadyAddresses: true
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
client: ""
tls: ""
ports:
client: 2181
election: 3888
follower: 2888
tls: 3181
sessionAffinity: None
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: false
name: ""
sidecars: []
snapCount: 100000
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
syncLimit: 5
tickTime: 2000
tls:
client:
auth: none
autoGenerated: false
enabled: false
existingSecret: ""
existingSecretKeystoreKey: ""
existingSecretTruststoreKey: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks
passwordsSecretKeystoreKey: ""
passwordsSecretName: ""
passwordsSecretTruststoreKey: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks
quorum:
auth: none
autoGenerated: false
enabled: false
existingSecret: ""
existingSecretKeystoreKey: ""
existingSecretTruststoreKey: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks
passwordsSecretKeystoreKey: ""
passwordsSecretName: ""
passwordsSecretTruststoreKey: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks
resources:
limits: {}
requests: {}
tolerations: []
topologySpreadConstraints: {}
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r430
resources:
limits: {}
requests: {}
zookeeperChrootPath: ""
zookeeperConnectionTimeoutMs: 6000
mail:
backend: dummy
from: ""
host: ""
password: ""
port: 25
useSsl: false
useTls: false
username: ""
memcached:
affinity: {}
architecture: standalone
args:
- memcached
- -u memcached
- -p 11211
- -v
- -m $(MEMCACHED_MEMORY_LIMIT)
- -I $(MEMCACHED_MAX_ITEM_SIZE)
auth:
enabled: false
password: ""
username: ""
autoscaling:
enabled: false
maxReplicas: 6
minReplicas: 3
targetCPU: 50
targetMemory: 50
clusterDomain: cluster.local
command: []
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
containerPorts:
memcached: 11211
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: sentry-memcached
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/memcached
tag: 1.6.15-debian-11-r10
initContainers: []
kubeVersion: ""
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
maxItemSize: "26214400"
memoryLimit: "2048"
metrics:
containerPorts:
metrics: 9150
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/memcached-exporter
tag: 0.10.0-debian-11-r2
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
podAnnotations:
prometheus.io/port: '{{ .Values.metrics.containerPorts.metrics }}'
prometheus.io/scrape: "true"
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
resources:
limits: {}
requests: {}
service:
annotations:
prometheus.io/port: '{{ .Values.metrics.service.ports.metrics }}'
prometheus.io/scrape: "true"
clusterIP: ""
ports:
metrics: 9150
sessionAffinity: None
serviceMonitor:
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
labels: {}
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
nameOverride: ""
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: false
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
replicaCount: 1
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
memcached: ""
ports:
memcached: 11211
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: false
name: ""
sidecars: []
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
terminationGracePeriodSeconds: ""
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r10
resources:
limits: {}
requests: {}
metrics:
affinity: {}
enabled: false
image:
pullPolicy: IfNotPresent
repository: prom/statsd-exporter
tag: v0.17.0
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
nodeSelector: {}
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
resources: {}
securityContext: {}
service:
labels: {}
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
namespace: ""
namespaceSelector: {}
scrapeInterval: 30s
tolerations: []
nginx:
affinity: {}
args: []
autoscaling:
enabled: false
maxReplicas: ""
minReplicas: ""
targetCPU: ""
targetMemory: ""
cloneStaticSiteFromGit:
branch: ""
enabled: false
extraEnvVars: []
extraVolumeMounts: []
gitClone:
args: []
command: []
gitSync:
args: []
command: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/git
tag: 2.36.1-debian-11-r2
interval: 60
repository: ""
clusterDomain: cluster.local
command: []
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
commonAnnotations: {}
commonLabels: {}
containerPort: 8080
containerPorts:
http: 8080
https: ""
containerSecurityContext:
enabled: false
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingServerBlockConfigmap: '{{ template "sentry.fullname" . }}'
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
healthIngress:
annotations: {}
enabled: false
extraHosts: []
extraPaths: []
extraRules: []
extraTls: []
hostname: example.local
ingressClassName: ""
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
hostAliases: []
hostIPC: false
hostNetwork: false
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/nginx
tag: 1.22.0-debian-11-r3
ingress:
annotations: {}
apiVersion: ""
enabled: false
extraHosts: []
extraPaths: []
extraRules: []
extraTls: []
hostname: nginx.local
ingressClassName: ""
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
initContainers: []
kubeVersion: ""
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
metrics:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/nginx-exporter
tag: 0.10.0-debian-11-r2
podAnnotations: {}
port: ""
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
resources:
limits: {}
requests: {}
securityContext:
enabled: false
runAsUser: 1001
service:
annotations:
prometheus.io/port: '{{ .Values.metrics.service.port }}'
prometheus.io/scrape: "true"
port: 9113
serviceMonitor:
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
labels: {}
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
nameOverride: ""
namespaceOverride: ""
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 0
minAvailable: 1
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: false
fsGroup: 1001
sysctls: []
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
replicaCount: 1
resources:
limits: {}
requests: {}
schedulerName: ""
serverBlock: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
http: ""
https: ""
ports:
http: 80
https: 443
sessionAffinity: None
sessionAffinityConfig: {}
targetPort:
http: http
https: https
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: false
create: false
name: ""
sidecarSingleProcessNamespace: false
sidecars: []
startupProbe:
enabled: false
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
staticSiteConfigmap: ""
staticSitePVC: ""
terminationGracePeriodSeconds: ""
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
postgresql:
audit:
clientMinMessages: error
logConnections: false
logDisconnections: false
logHostname: false
logLinePrefix: ""
logTimezone: ""
pgAuditLog: ""
pgAuditLogCatalog: "off"
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
postgresql:
existingSecret: ""
postgresqlDatabase: ""
postgresqlPassword: ""
postgresqlUsername: ""
replicationPassword: ""
servicePort: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
configurationConfigMap: ""
containerPorts:
postgresql: 5432
containerSecurityContext:
enabled: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingSecret: ""
extendedConfConfigMap: ""
extraDeploy: []
extraEnv: []
extraEnvVarsCM: ""
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
postgresql:
existingSecret: ""
postgresqlDatabase: ""
postgresqlPassword: ""
postgresqlUsername: ""
replicationPassword: ""
servicePort: ""
storageClass: ""
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/postgresql
tag: 11.14.0-debian-10-r28
initdbPassword: ""
initdbScripts: {}
initdbScriptsConfigMap: ""
initdbScriptsSecret: ""
initdbUser: ""
ldap:
baseDN: ""
bind_password: ""
bindDN: ""
enabled: false
port: ""
prefix: ""
scheme: ""
search_attr: ""
search_filter: ""
server: ""
suffix: ""
tls: ""
url: ""
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
metrics:
customMetrics: {}
enabled: false
extraEnvVars: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.10.0-debian-10-r172
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
enabled: false
runAsUser: 1001
service:
annotations:
prometheus.io/port: "9187"
prometheus.io/scrape: "true"
loadBalancerIP: ""
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
interval: ""
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
nameOverride: sentry-postgresql
networkPolicy:
allowExternal: true
enabled: false
explicitNamespacesSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
enabled: true
existingClaim: ""
mountPath: /bitnami/postgresql
selector: {}
size: 8Gi
snapshotName: ""
storageClass: ""
subPath: ""
pgHbaConfiguration: ""
postgresqlConfiguration: {}
postgresqlDataDir: /bitnami/postgresql/data
postgresqlDatabase: sentry
postgresqlDbUserConnectionLimit: ""
postgresqlExtendedConf: {}
postgresqlInitdbArgs: ""
postgresqlInitdbWalDir: ""
postgresqlMaxConnections: ""
postgresqlPassword: ""
postgresqlPghbaRemoveFilters: ""
postgresqlPostgresConnectionLimit: ""
postgresqlPostgresPassword: ""
postgresqlSharedPreloadLibraries: pgaudit
postgresqlStatementTimeout: ""
postgresqlTcpKeepalivesCount: ""
postgresqlTcpKeepalivesIdle: ""
postgresqlTcpKeepalivesInterval: ""
postgresqlUsername: postgres
primary:
affinity: {}
annotations: {}
extraInitContainers: []
extraPodSpec: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
priorityClassName: ""
service:
clusterIP: ""
nodePort: ""
type: ""
sidecars: []
tolerations: []
primaryAsStandBy:
enabled: false
primaryHost: ""
primaryPort: ""
psp:
create: false
rbac:
create: false
readReplicas:
affinity: {}
annotations: {}
extraInitContainers: []
extraPodSpec: {}
extraVolumeMounts: []
extraVolumes: []
labels: {}
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
persistence:
enabled: true
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
priorityClassName: ""
resources: {}
service:
clusterIP: ""
nodePort: ""
type: ""
sidecars: []
tolerations: []
topologySpreadConstraints: []
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
replication:
applicationName: sentry
enabled: false
numSynchronousReplicas: 1
password: repl_password
readReplicas: 2
singleService: true
synchronousCommit: "on"
uniqueServices: false
user: repl_user
resources:
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
securityContext:
enabled: true
fsGroup: 1001
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePort: ""
port: 5432
type: ClusterIP
serviceAccount:
autoMount: false
enabled: false
name: ""
shmVolume:
chmod:
enabled: true
enabled: true
sizeLimit: ""
startupProbe:
enabled: false
failureThreshold: 10
initialDelaySeconds: 30
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: ""
tls:
autoGenerated: false
certCAFilename: ""
certFilename: ""
certKeyFilename: ""
certificatesSecret: ""
crlFilename: ""
enabled: false
preferServerCiphers: true
updateStrategy:
type: RollingUpdate
usePasswordFile: false
volumePermissions:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r305
securityContext:
runAsUser: 0
rabbitmq:
advancedConfiguration: ""
affinity: {}
args: []
auth:
erlangCookie: pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA
existingErlangSecret: ""
existingPasswordSecret: ""
password: guest
tls:
autoGenerated: false
caCertificate: ""
enabled: false
existingSecret: ""
existingSecretFullChain: false
failIfNoPeerCert: true
serverCertificate: ""
serverKey: ""
sslOptionsVerify: verify_peer
username: guest
clusterDomain: cluster.local
clustering:
addressType: hostname
enabled: true
forceBoot: true
partitionHandling: autoheal
rebalance: true
command: []
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
communityPlugins: ""
configuration: |-
## Username and password
##
default_user = {{ .Values.auth.username }}
default_pass = CHANGEME
{{- if .Values.clustering.enabled }}
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }}
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = {{ .Values.clustering.partitionHandling }}
{{- end }}
{{- if .Values.loadDefinition.enabled }}
load_definitions = {{ .Values.loadDefinition.file }}
{{- end }}
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
{{ tpl .Values.extraConfiguration . }}
{{- if .Values.auth.tls.enabled }}
ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }}
listeners.ssl.default = {{ .Values.service.tlsPort }}
ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }}
ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem
ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem
ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem
{{- end }}
{{- if .Values.ldap.enabled }}
auth_backends.1 = rabbit_auth_backend_ldap
auth_backends.2 = internal
{{- range $index, $server := .Values.ldap.servers }}
auth_ldap.servers.{{ add $index 1 }} = {{ $server }}
{{- end }}
auth_ldap.port = {{ .Values.ldap.port }}
auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }}
{{- if .Values.ldap.tls.enabled }}
auth_ldap.use_ssl = true
{{- end }}
{{- end }}
{{- if .Values.metrics.enabled }}
## Prometheus metrics
##
prometheus.tcp.port = 9419
{{- end }}
{{- if .Values.memoryHighWatermark.enabled }}
## Memory Threshold
##
total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }}
vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }}
{{- end }}
containerSecurityContext: {}
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
dnsConfig: {}
dnsPolicy: ""
enabled: true
extraConfiguration: |
load_definitions = /app/load_definition.json
extraContainerPorts: []
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraPlugins: rabbitmq_auth_backend_ldap
extraSecrets:
load-definition:
load_definition.json: |
{
"users": [
{
"name": "{{ .Values.auth.username }}",
"password": "{{ .Values.auth.password }}",
"tags": "administrator"
}
],
"permissions": [{
"user": "{{ .Values.auth.username }}",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
}],
"policies": [
{
"name": "ha-all",
"pattern": ".*",
"vhost": "/",
"definition": {
"ha-mode": "all",
"ha-sync-mode": "automatic",
"ha-sync-batch-size": 1
}
}
],
"vhosts": [
{
"name": "/"
}
]
}
extraSecretsPrependReleaseName: false
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/rabbitmq
tag: 3.9.16-debian-10-r0
ingress:
annotations: {}
enabled: false
extraHosts: []
extraRules: []
extraTls: []
hostname: rabbitmq.local
ingressClassName: ""
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
initContainers: []
kubeVersion: ""
ldap:
enabled: false
port: "389"
servers: []
tls:
enabled: false
user_dn_pattern: cn=${username},dc=example,dc=org
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 120
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 20
loadDefinition:
enabled: true
existingSecret: load-definition
file: /app/load_definition.json
logs: '-'
maxAvailableSchedulers: ""
memoryHighWatermark:
enabled: false
type: relative
value: 0.4
metrics:
enabled: false
plugins: rabbitmq_prometheus
podAnnotations:
prometheus.io/port: '{{ .Values.service.metricsPort }}'
prometheus.io/scrape: "true"
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: 30s
metricRelabelings: []
namespace: ""
path: ""
podTargetLabels: {}
relabelings: []
relabellings: []
scrapeTimeout: ""
targetLabels: {}
nameOverride: ""
networkPolicy:
additionalRules: []
allowExternal: true
enabled: false
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
onlineSchedulers: ""
pdb:
create: true
maxUnavailable: ""
minAvailable: 1
persistence:
accessMode: ReadWriteOnce
annotations: {}
enabled: true
existingClaim: ""
mountPath: /bitnami/rabbitmq/mnesia
selector: {}
size: 8Gi
storageClass: ""
subPath: ""
volumes: []
plugins: rabbitmq_management rabbitmq_peer_discovery_k8s
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: OrderedReady
podSecurityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
priorityClassName: ""
rbac:
create: true
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 20
replicaCount: 3
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
annotationsHeadless: {}
distNodePort: ""
distPort: 25672
distPortEnabled: true
distPortName: dist
epmdNodePort: ""
epmdPortEnabled: true
epmdPortName: epmd
externalIPs: []
externalTrafficPolicy: Cluster
extraPorts: []
labels: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
managerNodePort: ""
managerPort: 15672
managerPortEnabled: true
managerPortName: http-stats
metricsNodePort: ""
metricsPort: 9419
metricsPortName: metrics
nodePort: ""
port: 5672
portEnabled: true
portName: amqp
tlsNodePort: ""
tlsPort: 5671
tlsPortName: amqp-ssl
type: ClusterIP
serviceAccount:
automountServiceAccountToken: true
create: true
name: ""
sidecars: []
statefulsetLabels: {}
terminationGracePeriodSeconds: 120
tolerations: []
topologySpreadConstraints: []
ulimitNofiles: "65536"
updateStrategyType: RollingUpdate
volumePermissions:
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r408
resources:
limits: {}
requests: {}
redis:
architecture: replication
auth:
enabled: false
existingSecret: ""
existingSecretPasswordKey: ""
password: ""
sentinel: false
usePasswordFiles: false
clusterDomain: cluster.local
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
redis:
password: ""
storageClass: ""
commonAnnotations: {}
commonConfiguration: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
commonLabels: {}
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
extraDeploy: []
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
redis:
password: ""
storageClass: ""
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/redis
tag: 6.2.7-debian-11-r3
kubeVersion: ""
master:
affinity: {}
args: []
command: []
configuration: ""
containerPorts:
redis: 6379
containerSecurityContext:
enabled: true
runAsUser: 1001
count: 1
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
disableCommands:
- FLUSHDB
- FLUSHALL
dnsConfig: {}
dnsPolicy: ""
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraFlags: []
extraVolumeMounts: []
extraVolumes: []
hostAliases: []
initContainers: []
kind: StatefulSet
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataSource: {}
enabled: true
existingClaim: ""
medium: ""
path: /data
selector: {}
size: 8Gi
sizeLimit: ""
storageClass: ""
subPath: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: true
fsGroup: 1001
preExecCmds: []
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
internalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
redis: ""
ports:
redis: 6379
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
shareProcessNamespace: false
sidecars: []
startupProbe:
enabled: false
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: 30
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
metrics:
command: []
containerSecurityContext:
enabled: true
runAsUser: 1001
enabled: false
extraArgs: {}
extraEnvVars: []
extraVolumeMounts: []
extraVolumes: []
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/redis-exporter
tag: 1.40.0-debian-11-r0
podAnnotations:
prometheus.io/port: "9121"
prometheus.io/scrape: "true"
podLabels: {}
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
redisTargetHost: localhost
resources:
limits: {}
requests: {}
service:
annotations: {}
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
port: 9121
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: 30s
metricRelabelings: []
namespace: ""
relabellings: []
scrapeTimeout: ""
nameOverride: sentry-redis
networkPolicy:
allowExternal: true
enabled: false
extraEgress: []
extraIngress: []
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
pdb:
create: false
maxUnavailable: ""
minAvailable: 1
podSecurityPolicy:
create: false
enabled: false
rbac:
create: false
rules: []
replica:
affinity: {}
args: []
autoscaling:
enabled: false
maxReplicas: 11
minReplicas: 1
targetCPU: ""
targetMemory: ""
command: []
configuration: ""
containerPorts:
redis: 6379
containerSecurityContext:
enabled: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
disableCommands:
- FLUSHDB
- FLUSHALL
dnsConfig: {}
dnsPolicy: ""
externalMaster:
enabled: false
host: ""
port: 6379
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraFlags: []
extraVolumeMounts: []
extraVolumes: []
hostAliases: []
initContainers: []
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataSource: {}
enabled: true
existingClaim: ""
medium: ""
path: /data
selector: {}
size: 8Gi
sizeLimit: ""
storageClass: ""
subPath: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: ""
podSecurityContext:
enabled: true
fsGroup: 1001
preExecCmds: []
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
replicaCount: 3
resources:
limits: {}
requests: {}
schedulerName: ""
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
internalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
redis: ""
ports:
redis: 6379
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
shareProcessNamespace: false
sidecars: []
startupProbe:
enabled: true
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: 30
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
secretAnnotations: {}
sentinel:
args: []
automateClusterRecovery: false
command: []
configuration: ""
containerPorts:
sentinel: 26379
containerSecurityContext:
enabled: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
downAfterMilliseconds: 60000
enabled: false
externalMaster:
enabled: false
host: ""
port: 6379
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
failoverTimeout: 18000
getMasterTimeout: 220
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/redis-sentinel
tag: 6.2.7-debian-11-r4
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
masterSet: mymaster
parallelSyncs: 1
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataSource: {}
enabled: false
medium: ""
selector: {}
size: 100Mi
storageClass: ""
preExecCmds: []
quorum: 2
readinessProbe:
enabled: true
failureThreshold: 5
initialDelaySeconds: 20
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
resources:
limits: {}
requests: {}
service:
annotations: {}
clusterIP: ""
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
redis: ""
sentinel: ""
ports:
redis: 6379
sentinel: 26379
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
startupProbe:
enabled: true
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
terminationGracePeriodSeconds: 30
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: true
name: ""
sysctl:
command: []
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r3
mountHostSys: false
resources:
limits: {}
requests: {}
tls:
authClients: true
autoGenerated: false
certCAFilename: ""
certFilename: ""
certKeyFilename: ""
certificatesSecret: ""
dhParamsFilename: ""
enabled: false
existingSecret: ""
useExternalDNS:
additionalAnnotations: {}
annotationKey: external-dns.alpha.kubernetes.io/
enabled: false
suffix: ""
usePassword: false
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 11-debian-11-r3
resources:
limits: {}
requests: {}
relay:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
env: []
mode: managed
nodeSelector: {}
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
replicas: 1
resources: {}
securityContext: {}
service:
annotations: {}
sidecars: []
volumes: []
revisionHistoryLimit: 10
sentry:
cleanup:
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
days: 90
enabled: true
failedJobsHistoryLimit: 5
schedule: 0 0 * * *
serviceAccount: {}
sidecars: []
successfulJobsHistoryLimit: 5
volumes: []
cron:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
sidecars: []
volumes: []
features:
orgSubdomains: false
vstsLimitedScopes: true
ingestConsumer:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 3
minReplicas: 1
targetCPUUtilizationPercentage: 50
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
postProcessForward:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
singleOrganization: true
subscriptionConsumerEvents:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
subscriptionConsumerTransactions:
affinity: {}
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sidecars: []
volumes: []
web:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
env: []
nodeSelector: {}
probeFailureThreshold: 5
probeInitialDelaySeconds: 10
probePeriodSeconds: 10
probeSuccessThreshold: 1
probeTimeoutSeconds: 2
replicas: 1
resources: {}
securityContext: {}
service:
annotations: {}
sidecars: []
strategyType: RollingUpdate
volumes: []
worker:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
env: []
livenessProbe:
enabled: false
failureThreshold: 3
periodSeconds: 60
timeoutSeconds: 10
nodeSelector: {}
replicas: 3
resources: {}
sidecars: []
volumes: []
service:
annotations: {}
externalPort: 9000
name: sentry
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
enabled: false
name: sentry
slack: {}
snuba:
api:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
command: {}
env: []
liveness:
timeoutSeconds: 2
nodeSelector: {}
probeInitialDelaySeconds: 10
readiness:
timeoutSeconds: 2
replicas: 1
resources: {}
securityContext: {}
service:
annotations: {}
sidecars: []
volumes: []
cleanupErrors:
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
enabled: true
schedule: 0 * * * *
serviceAccount: {}
sidecars: []
successfulJobsHistoryLimit: 5
volumes: []
cleanupTransactions:
activeDeadlineSeconds: 100
concurrencyPolicy: Allow
enabled: true
failedJobsHistoryLimit: 5
schedule: 0 * * * *
serviceAccount: {}
sidecars: []
successfulJobsHistoryLimit: 5
volumes: []
consumer:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
dbInitJob:
env: []
migrateJob:
env: []
outcomesConsumer:
affinity: {}
autoOffsetReset: earliest
env: []
maxBatchSize: "3"
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
replacer:
affinity: {}
autoOffsetReset: earliest
env: []
maxBatchSize: "3"
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sessionsConsumer:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
subscriptionConsumerEvents:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
subscriptionConsumerTransactions:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
transactionsConsumer:
affinity: {}
autoOffsetReset: earliest
env: []
nodeSelector: {}
replicas: 1
resources: {}
securityContext: {}
sourcemaps:
enabled: false
symbolicator:
api:
affinity: {}
autoscaling:
enabled: false
maxReplicas: 5
minReplicas: 2
targetCPUUtilizationPercentage: 50
config: |-
# See: https://getsentry.github.io/symbolicator/#configuration
cache_dir: "/data"
bind: "0.0.0.0:3021"
logging:
level: "warn"
metrics:
statsd: null
prefix: "symbolicator"
sentry_dsn: null
connect_to_reserved_ips: true
# caches:
# downloaded:
# max_unused_for: 1w
# retry_misses_after: 5m
# retry_malformed_after: 5m
# derived:
# max_unused_for: 1w
# retry_misses_after: 5m
# retry_malformed_after: 5m
# diagnostics:
# retention: 1w
env: []
nodeSelector: {}
probeInitialDelaySeconds: 10
replicas: 1
resources: {}
securityContext: {}
cleanup:
enabled: false
enabled: false
system:
adminEmail: ""
public: false
url: ""
user:
create: true
email: admin@sentry.local
password: aaaa
zookeeper:
affinity: {}
args: []
auth:
clientPassword: ""
clientUser: ""
enabled: false
existingSecret: ""
serverPasswords: ""
serverUsers: ""
autopurge:
purgeInterval: 0
snapRetainCount: 3
clusterDomain: cluster.local
command:
- /scripts/setup.sh
common:
exampleValue: common-chart
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
commonAnnotations: {}
commonLabels: {}
configuration: ""
containerPorts:
client: 2181
election: 3888
follower: 2888
tls: 3181
containerSecurityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1001
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
dataLogDir: ""
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
enabled: true
existingConfigmap: ""
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
extraVolumeMounts: []
extraVolumes: []
fourlwCommandsWhitelist: srvr, mntr, ruok
fullnameOverride: ""
global:
imagePullSecrets: []
imageRegistry: ""
storageClass: ""
heapSize: 1024
hostAliases: []
image:
debug: false
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/zookeeper
tag: 3.8.0-debian-10-r0
initContainers: []
initLimit: 10
jvmFlags: ""
kubeVersion: ""
lifecycleHooks: {}
listenOnAllIPs: false
livenessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
logLevel: ERROR
maxClientCnxns: 60
maxSessionTimeout: 40000
metrics:
containerPort: 9141
enabled: false
prometheusRule:
additionalLabels: {}
enabled: false
namespace: ""
rules: []
service:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.metrics.service.port }}'
prometheus.io/scrape: "true"
port: 9141
type: ClusterIP
serviceMonitor:
additionalLabels: {}
enabled: false
honorLabels: false
interval: ""
jobLabel: ""
metricRelabelings: []
namespace: ""
relabelings: []
scrapeTimeout: ""
selector: {}
minServerId: 1
nameOverride: zookeeper-clickhouse
namespaceOverride: ""
networkPolicy:
allowExternal: true
enabled: false
nodeAffinityPreset:
key: ""
type: ""
values: []
nodeSelector: {}
pdb:
create: false
maxUnavailable: 1
minAvailable: ""
persistence:
accessModes:
- ReadWriteOnce
annotations: {}
dataLogDir:
existingClaim: ""
selector: {}
size: 8Gi
enabled: true
existingClaim: ""
selector: {}
size: 8Gi
storageClass: ""
podAffinityPreset: ""
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podManagementPolicy: Parallel
podSecurityContext:
enabled: true
fsGroup: 1001
preAllocSize: 65536
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
probeCommandTimeout: 2
successThreshold: 1
timeoutSeconds: 5
replicaCount: 3
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
schedulerName: ""
service:
annotations: {}
clusterIP: ""
disableBaseClientPort: false
externalTrafficPolicy: Cluster
extraPorts: []
headless:
annotations: {}
publishNotReadyAddresses: true
loadBalancerIP: ""
loadBalancerSourceRanges: []
nodePorts:
client: ""
tls: ""
ports:
client: 2181
election: 3888
follower: 2888
tls: 3181
sessionAffinity: None
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: true
create: false
name: ""
sidecars: []
snapCount: 100000
startupProbe:
enabled: false
failureThreshold: 15
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
syncLimit: 5
tickTime: 2000
tls:
client:
autoGenerated: false
enabled: false
existingSecret: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks
passwordsSecretName: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks
quorum:
autoGenerated: false
enabled: false
existingSecret: ""
keystorePassword: ""
keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks
passwordsSecretName: ""
truststorePassword: ""
truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks
resources:
limits: {}
requests: {}
tolerations: []
topologySpreadConstraints: {}
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
volumePermissions:
containerSecurityContext:
runAsUser: 0
enabled: false
image:
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/bitnami-shell
tag: 10-debian-10-r368
resources:
limits: {}
requests: {}
HOOKS:
---
# Source: sentry/templates/deployment-relay.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-relay
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "25"
spec:
selector:
matchLabels:
app: sentry
release: "sentry"
role: relay
replicas: 1
revisionHistoryLimit: 10
template:
metadata:
annotations:
checksum/relay: 8074a66c015a8309dc9bdef7524b89bb223749847663f454012dba4e7ed06cc3
checksum/config.yaml: 1aebd4cf68cf1959ee73319f1d9dcac45bc36c38b57bbacc85a79000c4498083
labels:
app: sentry
release: "sentry"
role: relay
spec:
affinity:
initContainers:
- name: sentry-relay-init
image: "getsentry/relay:22.6.0"
imagePullPolicy: IfNotPresent
args:
- "credentials"
- "generate"
env:
- name: RELAY_PORT
value: '3000'
volumeMounts:
- name: credentials
mountPath: /work/.relay
- name: config
mountPath: /work/.relay/config.yml
subPath: config.yml
readOnly: true
containers:
- name: sentry-relay
image: "getsentry/relay:22.6.0"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
env:
- name: RELAY_PORT
value: '3000'
volumeMounts:
- name: credentials
mountPath: /work/.relay
- name: config
mountPath: /work/.relay/config.yml
subPath: config.yml
readOnly: true
livenessProbe:
failureThreshold: 5
httpGet:
path: /api/relay/healthcheck/live/
port: 3000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
readinessProbe:
failureThreshold: 5
httpGet:
path: /api/relay/healthcheck/ready/
port: 3000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
resources:
{}
volumes:
- name: config
configMap:
name: sentry-relay
defaultMode: 0644
- name: credentials
emptyDir: {}
---
# Source: sentry/templates/deployment-sentry-ingest-consumer.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-ingest-consumer
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "10"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: ingest-consumer
replicas: 1
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: de15e82c47dc79d1da18e2f90346ac37d70f7eb8305690821cdec204574e39e8
labels:
app: sentry
release: "sentry"
role: ingest-consumer
spec:
affinity:
containers:
- name: sentry-ingest-consumer
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry"]
args:
- "run"
- "ingest-consumer"
- "--all-consumer-types"
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: C_FORCE_ROOT
value: "true"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/deployment-sentry-post-process-forwarder.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-post-process-forward
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "10"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: sentry-post-process-forward
replicas: 1
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 2ffe2b34249e688b79045457a2089dbab3d9f4ba191d184958caf7532ea19d60
labels:
app: sentry
release: "sentry"
role: sentry-post-process-forward
spec:
affinity:
containers:
- name: sentry-post-process-forward
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry", "run", "post-process-forwarder", "--commit-batch-size", "1"]
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/deployment-sentry-subscription-consumer-events.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-subscription-consumer-events
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "10"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: sentry-subscription-consumer-events
replicas: 1
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 79e0dfc2231034d0c4b8861a332e086f87f37daff6830dea54d355fe91f66d65
labels:
app: sentry
release: "sentry"
role: sentry-subscription-consumer-events
spec:
affinity:
containers:
- name: sentry-subscription-consumer-events
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry"]
args:
- "run"
- "query-subscription-consumer"
- "--topic"
- "events-subscription-results"
- "--commit-batch-size"
- "1"
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/deployment-sentry-subscription-consumer-transactions.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-subscription-consumer-transactions
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "10"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: sentry-subscription-consumer-transactions
replicas: 1
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: a5db579ea0417a1cd05eec3ab616b2f5f8bb0dbde1bc125fcb6d795914ba4b03
labels:
app: sentry
release: "sentry"
role: sentry-subscription-consumer-transactions
spec:
affinity:
containers:
- name: sentry-subscription-consumer-transactions
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry"]
args:
- "run"
- "query-subscription-consumer"
- "--topic"
- "transactions-subscription-results"
- "--commit-batch-size"
- "1"
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/deployment-snuba-consumer.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-consumer
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "10"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-consumer
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-consumer
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "consumer"
- "--storage"
- "errors"
- "--auto-offset-reset"
- "earliest"
- "--max-batch-time-ms"
- "750"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/deployment-snuba-outcomes-consumer.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-outcomes-consumer
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "17"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-outcomes-consumer
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-outcomes-consumer
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "consumer"
- "--storage"
- "outcomes_raw"
- "--auto-offset-reset"
- "earliest"
- "--max-batch-size"
- "3"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/deployment-snuba-replacer.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-replacer
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "18"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-replacer
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-replacer
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "replacer"
- "--storage"
- "errors"
- "--auto-offset-reset"
- "earliest"
- "--max-batch-size"
- "3"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/deployment-snuba-sessions-consumer.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-sessions-consumer
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "16"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: sessions-consumer
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: sessions-consumer
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "consumer"
- "--storage"
- "sessions_raw"
- "--auto-offset-reset"
- "earliest"
- "--max-batch-time-ms"
- "750"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/deployment-snuba-subscription-consumer-events.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-subscription-consumer-events
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "18"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-subscription-consumer-events
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-subscription-consumer-events
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "subscriptions-scheduler-executor"
- "--auto-offset-reset=earliest"
- "--dataset=events"
- "--entity=events"
- "--no-strict-offset-reset"
- "--consumer-group=snuba-events-subscriptions-consumers"
- "--followed-consumer-group=snuba-consumers"
- "--delay-seconds=60"
- "--schedule-ttl=60"
- "--stale-threshold-seconds=900"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/deployment-snuba-subscription-consumer-transactions.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-subscription-consumer-transactions
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "18"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-subscription-consumer-transactions
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-subscription-consumer-transactions
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "subscriptions-scheduler-executor"
- "--auto-offset-reset=earliest"
- "--dataset=transactions"
- "--entity=transactions"
- "--no-strict-offset-reset"
- "--consumer-group=snuba-transactions-subscriptions-consumers"
- "--followed-consumer-group=transactions_group"
- "--delay-seconds=60"
- "--schedule-ttl=60"
- "--stale-threshold-seconds=900"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/deployment-snuba-transactions-consumer.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-transactions-consumer
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
annotations:
meta.helm.sh/release-name: "sentry"
meta.helm.sh/release-namespace: "infrastructure-prod"
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-weight": "12"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-transactions-consumer
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-transactions-consumer
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "consumer"
- "--storage"
- "transactions"
- "--consumer-group"
- "transactions_group"
- "--auto-offset-reset"
- "earliest"
- "--max-batch-time-ms"
- "750"
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/hooks/sentry-db-check.job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: sentry-db-check
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
"helm.sh/hook-weight": "-1"
spec:
activeDeadlineSeconds: 100
template:
metadata:
name: sentry-db-check
annotations:
labels:
app: sentry
release: "sentry"
spec:
restartPolicy: Never
containers:
- name: db-check
image: subfuzion/netcat:latest
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
echo "Checking if clickhouse is up"
CLICKHOUSE_STATUS=0
while [ $CLICKHOUSE_STATUS -eq 0 ]; do
CLICKHOUSE_STATUS=1
CLICKHOUSE_REPLICAS=3
i=0; while [ $i -lt $CLICKHOUSE_REPLICAS ]; do
CLICKHOUSE_HOST=sentry-clickhouse-$i.sentry-clickhouse-headless
if ! nc -z "$CLICKHOUSE_HOST" 9000; then
CLICKHOUSE_STATUS=0
echo "$CLICKHOUSE_HOST is not available yet"
fi
i=$((i+1))
done
if [ "$CLICKHOUSE_STATUS" -eq 0 ]; then
echo "Clickhouse not ready. Sleeping for 10s before trying again"
sleep 10;
fi
done
echo "Clickhouse is up"
echo "Checking if kafka is up"
KAFKA_STATUS=0
while [ $KAFKA_STATUS -eq 0 ]; do
KAFKA_STATUS=1
KAFKA_REPLICAS=3
i=0; while [ $i -lt $KAFKA_REPLICAS ]; do
KAFKA_HOST=sentry-kafka-$i.sentry-kafka-headless
if ! nc -z "$KAFKA_HOST" 9092; then
KAFKA_STATUS=0
echo "$KAFKA_HOST is not available yet"
fi
i=$((i+1))
done
if [ "$KAFKA_STATUS" -eq 0 ]; then
echo "Kafka not ready. Sleeping for 10s before trying again"
sleep 10;
fi
done
echo "Kafka is up"
env:
resources:
limits:
memory: 64Mi
requests:
cpu: 100m
memory: 64Mi
---
# Source: sentry/templates/hooks/sentry-db-init.job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: sentry-db-init
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
"helm.sh/hook-weight": "6"
spec:
activeDeadlineSeconds: 100
template:
metadata:
name: sentry-db-init
annotations:
checksum/configmap.yaml: fb7d44624e2145432e9057a15894e651b1b044578d879cb63b86b6e830684673
labels:
app: sentry
release: "sentry"
spec:
restartPolicy: Never
containers:
- name: db-init-job
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry","upgrade","--noinput"]
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
resources:
limits:
memory: 2048Mi
requests:
cpu: 300m
memory: 2048Mi
volumes:
- name: config
configMap:
name: sentry-sentry
---
# Source: sentry/templates/hooks/snuba-db-init.job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: sentry-snuba-db-init
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
"helm.sh/hook-weight": "3"
spec:
activeDeadlineSeconds: 100
template:
metadata:
name: sentry-snuba-db-init
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
spec:
restartPolicy: Never
containers:
- name: snuba-init
image: "getsentry/snuba:22.6.0"
command: [snuba, bootstrap, --no-migrate, --force]
env:
- name: LOG_LEVEL
value: debug
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
limits:
cpu: 2000m
memory: 1Gi
requests:
cpu: 700m
memory: 1Gi
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/hooks/snuba-migrate.job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: sentry-snuba-migrate
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
"helm.sh/hook-weight": "5"
spec:
activeDeadlineSeconds: 100
template:
metadata:
name: sentry-snuba-migrate
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
spec:
restartPolicy: Never
containers:
- name: snuba-migrate
image: "getsentry/snuba:22.6.0"
command: [snuba, migrations, migrate, --force]
env:
- name: LOG_LEVEL
value: debug
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
limits:
cpu: 2000m
memory: 1Gi
requests:
cpu: 700m
memory: 1Gi
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/hooks/user-create.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: sentry-user-create
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
annotations:
"helm.sh/hook": "post-install,post-upgrade"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation"
"helm.sh/hook-weight": "9"
spec:
activeDeadlineSeconds: 100
template:
metadata:
name: sentry-user-create
annotations:
checksum/configmap.yaml: 8fefcaa3b0d6bee17392d0c8173d88c67d4b4a060fb3e50b85088306a55e6095
labels:
app: sentry
release: "sentry"
spec:
restartPolicy: Never
containers:
- name: user-create-job
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["/bin/bash", "-c"]
# Create user but do not exit 1 when user already exists (exit code 3 from createuser command)
# https://docs.sentry.io/server/cli/createuser/
args:
- >
sentry createuser \
--no-input \
--superuser \
--email "admin@sentry.local" \
--password "$ADMIN_PASSWORD" || true; \
if [ $? -eq 0 ] || [ $? -eq 3 ]; then \
exit 0; \
else \
exit 1; \
fi
env:
- name: ADMIN_PASSWORD
value: "aaaa"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
resources:
limits:
memory: 2048Mi
requests:
cpu: 300m
memory: 2048Mi
volumes:
- name: config
configMap:
name: sentry-sentry
MANIFEST:
---
# Source: sentry/charts/rabbitmq/templates/pdb.yaml
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: sentry-rabbitmq
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/name: rabbitmq
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/kafka/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: sentry-kafka
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: kafka
helm.sh/chart: kafka-16.3.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kafka
annotations:
automountServiceAccountToken: true
---
# Source: sentry/charts/rabbitmq/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: sentry-rabbitmq
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: true
secrets:
- name: sentry-rabbitmq
---
# Source: sentry/charts/redis/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: sentry-sentry-redis
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
---
# Source: sentry/charts/postgresql/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: sentry-sentry-postgresql
labels:
app.kubernetes.io/name: sentry-postgresql
helm.sh/chart: postgresql-10.16.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
namespace: infrastructure-prod
type: Opaque
data:
postgresql-password: "RDBaOTQzdG0yMw=="
---
# Source: sentry/charts/rabbitmq/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: sentry-rabbitmq
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
rabbitmq-password: "Z3Vlc3Q="
rabbitmq-erlang-cookie: "cEhncHkzUTZhZFRza3pBVDZiTEhDRnFGVEY3bE14aEE="
---
# Source: sentry/charts/rabbitmq/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: load-definition
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
type: Opaque
stringData:
load_definition.json: |
{
"users": [
{
"name": "guest",
"password": "guest",
"tags": "administrator"
}
],
"permissions": [{
"user": "guest",
"vhost": "/",
"configure": ".*",
"write": ".*",
"read": ".*"
}],
"policies": [
{
"name": "ha-all",
"pattern": ".*",
"vhost": "/",
"definition": {
"ha-mode": "all",
"ha-sync-mode": "automatic",
"ha-sync-batch-size": 1
}
}
],
"vhosts": [
{
"name": "/"
}
]
}
---
# Source: sentry/templates/secret-snuba-env.yaml
apiVersion: v1
kind: Secret
metadata:
name: sentry-snuba-env
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
type: Opaque
data:
CLICKHOUSE_DATABASE: "ZGVmYXVsdA=="
CLICKHOUSE_USER: "ZGVmYXVsdA=="
CLICKHOUSE_PASSWORD: ""
---
# Source: sentry/charts/clickhouse/templates/configmap-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-clickhouse-config
labels:
app.kubernetes.io/name: clickhouse-config
app.kubernetes.io/instance: sentry-config
app.kubernetes.io/managed-by: Helm
data:
config.xml: |-
<?xml version="1.0"?>
<yandex>
<path>/var/lib/clickhouse/</path>
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
<include_from>/etc/clickhouse-server/metrica.d/metrica.xml</include_from>
<users_config>users.xml</users_config>
<display_name>sentry-clickhouse</display_name>
<listen_host>0.0.0.0</listen_host>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<interserver_http_port>9009</interserver_http_port>
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<max_concurrent_queries>100</max_concurrent_queries>
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<mark_cache_size>5368709120</mark_cache_size>
<timezone>UTC</timezone>
<umask>022</umask>
<mlock_executable>false</mlock_executable>
<remote_servers incl="clickhouse_remote_servers" optional="true" />
<zookeeper incl="zookeeper-servers" optional="true" />
<macros incl="macros" optional="true" />
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<max_session_timeout>3600</max_session_timeout>
<default_session_timeout>60</default_session_timeout>
<disable_internal_dns_cache>1</disable_internal_dns_cache>
<query_log>
<database>system</database>
<table>query_log</table>
<partition_by>toYYYYMM(event_date)</partition_by>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<query_thread_log>
<database>system</database>
<table>query_thread_log</table>
<partition_by>toYYYYMM(event_date)</partition_by>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_thread_log>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>
</distributed_ddl>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
</logger>
</yandex>
---
# Source: sentry/charts/clickhouse/templates/configmap-metrika.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-clickhouse-metrica
labels:
app.kubernetes.io/name: clickhouse-metrica
app.kubernetes.io/instance: sentry-metrica
app.kubernetes.io/managed-by: Helm
data:
metrica.xml: |-
<?xml version="1.0"?>
<yandex>
<zookeeper-servers>
<node index="clickhouse">
<host>sentry-zookeeper-clickhouse</host>
<port>2181</port>
</node>
<session_timeout_ms>30000</session_timeout_ms>
<operation_timeout_ms>10000</operation_timeout_ms>
<root></root>
<identity></identity>
</zookeeper-servers>
<clickhouse_remote_servers>
<sentry-clickhouse>
<shard>
<replica>
<internal_replication>true</internal_replication>
<host>sentry-clickhouse-0.sentry-clickhouse-headless.infrastructure-prod.svc.cluster.local</host>
<port>9000</port>
<user>default</user>
<compression>true</compression>
</replica>
</shard>
<shard>
<replica>
<internal_replication>true</internal_replication>
<host>sentry-clickhouse-1.sentry-clickhouse-headless.infrastructure-prod.svc.cluster.local</host>
<port>9000</port>
<user>default</user>
<compression>true</compression>
</replica>
</shard>
<shard>
<replica>
<internal_replication>true</internal_replication>
<host>sentry-clickhouse-2.sentry-clickhouse-headless.infrastructure-prod.svc.cluster.local</host>
<port>9000</port>
<user>default</user>
<compression>true</compression>
</replica>
</shard>
</sentry-clickhouse>
</clickhouse_remote_servers>
<macros>
<replica from_env="HOSTNAME"></replica>
<shard from_env="SHARD"></shard>
</macros>
</yandex>
---
# Source: sentry/charts/clickhouse/templates/configmap-users.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-clickhouse-users
labels:
app.kubernetes.io/name: clickhouse-users
app.kubernetes.io/instance: sentry-users
app.kubernetes.io/managed-by: Helm
data:
users.xml: |-
<?xml version="1.0"?>
<yandex>
</yandex>
---
# Source: sentry/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-zookeeper-scripts
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper
helm.sh/chart: zookeeper-9.1.5
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
data:
init-certs.sh: |-
#!/bin/bash
setup.sh: |-
#!/bin/bash
# Execute entrypoint as usual after obtaining ZOO_SERVER_ID
# check ZOO_SERVER_ID in persistent volume via myid
# if not present, set based on POD hostname
if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
else
HOSTNAME="$(hostname -s)"
if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
ORD=${BASH_REMATCH[2]}
export ZOO_SERVER_ID="$((ORD + 1 ))"
else
echo "Failed to get index from hostname $HOST"
exit 1
fi
fi
exec /entrypoint.sh /run.sh
---
# Source: sentry/charts/kafka/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-kafka-scripts
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: kafka
helm.sh/chart: kafka-16.3.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
data:
setup.sh: |-
#!/bin/bash
ID="${MY_POD_NAME#"sentry-kafka-"}"
if [[ -f "/bitnami/kafka/data/meta.properties" ]]; then
export KAFKA_CFG_BROKER_ID="$(grep "broker.id" /bitnami/kafka/data/meta.properties | awk -F '=' '{print $2}')"
else
export KAFKA_CFG_BROKER_ID="$((ID + 0))"
fi
# Configure zookeeper client
exec /entrypoint.sh /run.sh
---
# Source: sentry/charts/rabbitmq/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-rabbitmq-config
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
data:
rabbitmq.conf: |-
## Username and password
##
default_user = guest
default_pass = CHANGEME
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
load_definitions = /app/load_definition.json
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
load_definitions = /app/load_definition.json
---
# Source: sentry/charts/redis/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-sentry-redis-configuration
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: sentry/charts/redis/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-sentry-redis-health
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: sentry/charts/redis/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-sentry-redis-scripts
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--protected-mode" "no")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--protected-mode" "no")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: sentry/charts/zookeeper/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-zookeeper-clickhouse-scripts
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper-clickhouse
helm.sh/chart: zookeeper-9.0.0
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
data:
init-certs.sh: |-
#!/bin/bash
setup.sh: |-
#!/bin/bash
# Execute entrypoint as usual after obtaining ZOO_SERVER_ID
# check ZOO_SERVER_ID in persistent volume via myid
# if not present, set based on POD hostname
if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
else
HOSTNAME="$(hostname -s)"
if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
ORD=${BASH_REMATCH[2]}
export ZOO_SERVER_ID="$((ORD + 1 ))"
else
echo "Failed to get index from hostname $HOST"
exit 1
fi
fi
exec /entrypoint.sh /run.sh
---
# Source: sentry/templates/configmap-memcached.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-memcached
data:
MEMCACHED_MEMORY_LIMIT: "2048"
MEMCACHED_MAX_ITEM_SIZE: "26214400"
---
# Source: sentry/templates/configmap-nginx.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-nginx
data:
server-block.conf: |
upstream relay {
server sentry-relay:3000;
}
upstream sentry {
server sentry-web:9000;
}
server {
listen 8080;
proxy_redirect off;
proxy_set_header Host $host;
location /api/store/ {
proxy_pass http://relay;
}
location ~ ^/api/[1-9]\d*/ {
proxy_pass http://relay;
}
location / {
proxy_pass http://sentry;
}
}
---
# Source: sentry/templates/configmap-relay.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-relay
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
data:
config.yml: |-
relay:
mode: managed
upstream: "http://sentry-web:9000/"
host: 0.0.0.0
port: 3000
processing:
enabled: true
kafka_config:
- name: "bootstrap.servers"
value: "sentry-kafka:9092"
- name: "message.max.bytes"
value: 50000000 # 50MB or bust
redis: "redis://:@sentry-sentry-redis-master:6379"
# No YAML relay config given
---
# Source: sentry/templates/configmap-sentry.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-sentry
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
data:
config.yml: |-
system.secret-key: "mRw7XE9c4PMhiKw9kLGZUaKr8QcNJPZ4R7C2Z2CBIVgjeivnDN"
# This URL will be used to tell Symbolicator where to obtain the Sentry source.
# See https://getsentry.github.io/symbolicator/api/
system.internal-url-prefix: 'http://sentry-web:9000'
symbolicator.enabled: false
##########
# Github #
##########
##########
# Google #
##########
#########
# Slack #
#########
#########
# Redis #
#########
redis.clusters:
default:
hosts:
0:
host: "sentry-sentry-redis-master"
port: 6379
password: ""
################
# File storage #
################
# Uploaded media uses these `filestore` settings. The available
# backends are either `filesystem` or `s3`.
filestore.backend: "filesystem"
filestore.options:
location: "/var/lib/sentry/files"
sentry.conf.py: |-
from sentry.conf.server import * # NOQA
from distutils.util import strtobool
DATABASES = {
"default": {
"ENGINE": "sentry.db.postgres",
"NAME": "sentry",
"USER": "postgres",
"PASSWORD": os.environ.get("POSTGRES_PASSWORD", ""),
"HOST": "sentry-sentry-postgresql",
"PORT": 5432,
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
SENTRY_OPTIONS["system.event-retention-days"] = int(env('SENTRY_EVENT_RETENTION_DAYS') or "90")
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
BROKER_URL = os.environ.get("BROKER_URL", "amqp://guest:guest@sentry-rabbitmq:5672//")
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
# CACHES = {
# "default": {
# "BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
# "LOCATION": ["memcached:11211"],
# "TIMEOUT": 3600,
# }
# }
# A primary cache is required for things such as processing events
SENTRY_CACHE = "sentry.cache.redis.RedisCache"
DEFAULT_KAFKA_OPTIONS = {
"bootstrap.servers": "sentry-kafka:9092",
"message.max.bytes": 50000000,
"socket.timeout.ms": 1000,
}
SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream"
SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS}
KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter"
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer"
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota"
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
#########
# SNUBA #
#########
SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend"
SENTRY_SEARCH_OPTIONS = {}
SENTRY_TAGSTORE_OPTIONS = {}
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend"
##############
# Web Server #
##############
SENTRY_WEB_HOST = "0.0.0.0"
SENTRY_WEB_PORT = 9000
SENTRY_PUBLIC = False
SENTRY_WEB_OPTIONS = {
"http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT),
"protocol": "uwsgi",
# This is needed to prevent https://git.io/fj7Lw
"uwsgi-socket": None,
# These ase for proper HTTP/1.1 support from uWSGI
# Without these it doesn't do keep-alives causing
# issues with Relay's direct requests.
"http-keepalive": True,
"http-chunked-input": True,
# the number of web workers
'workers': 3,
# Turn off memory reporting
"memory-report": False,
# Some stuff so uwsgi will cycle workers sensibly
'max-requests': 100000,
'max-requests-delta': 500,
'max-worker-lifetime': 86400,
# Duplicate options from sentry default just so we don't get
# bit by sentry changing a default value that we depend on.
'thunder-lock': True,
'log-x-forwarded-for': False,
'buffer-size': 32768,
'limit-post': 209715200,
'disable-logging': True,
'reload-on-rss': 600,
'ignore-sigpipe': True,
'ignore-write-errors': True,
'disable-write-exception': True,
}
###########
# SSL/TLS #
###########
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and enable the settings below
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
# End of SSL/TLS settings
############
# Features #
############
SENTRY_FEATURES = {
"auth:register": True
}
SENTRY_FEATURES["projects:sample-events"] = False
SENTRY_FEATURES.update(
{
feature: True
for feature in ("organizations:advanced-search",
"organizations:android-mappings",
"organizations:api-keys",
"organizations:boolean-search",
"organizations:related-events",
"organizations:alert-filters",
"organizations:custom-symbol-sources",
"organizations:dashboards-basic",
"organizations:dashboards-edit",
"organizations:data-forwarding",
"organizations:discover",
"organizations:discover-basic",
"organizations:discover-query",
"organizations:enterprise-perf",
"organizations:event-attachments",
"organizations:events",
"organizations:global-views",
"organizations:incidents",
"organizations:metric-alert-builder-aggregate",
"organizations:metric-alert-gui-filters",
"organizations:integrations-event-hooks",
"organizations:integrations-issue-basic",
"organizations:integrations-issue-sync",
"organizations:integrations-alert-rule",
"organizations:integrations-chat-unfurl",
"organizations:integrations-incident-management",
"organizations:integrations-ticket-rules",
"organizations:integrations-vsts-limited-scopes",
"organizations:integrations-stacktrace-link",
"organizations:internal-catchall",
"organizations:invite-members",
"organizations:large-debug-files",
"organizations:monitors",
"organizations:onboarding",
"organizations:org-saved-searches",
"organizations:performance-view",
"organizations:project-detail",
"organizations:relay",
"organizations:release-performance-views",
"organizations:rule-page",
"organizations:set-grouping-config",
"organizations:custom-event-title",
"organizations:slack-migration",
"organizations:sso-basic",
"organizations:sso-rippling",
"organizations:sso-saml2",
"organizations:sso-migration",
"organizations:stacktrace-hover-preview",
"organizations:symbol-sources",
"organizations:transaction-comparison",
"organizations:usage-stats-graph",
"organizations:inbox",
"organizations:unhandled-issue-flag",
"organizations:invite-members-rate-limits",
"organizations:dashboards-v2",
"projects:alert-filters",
"projects:custom-inbound-filters",
"projects:data-forwarding",
"projects:discard-groups",
"projects:issue-alerts-targeting",
"projects:minidump",
"projects:rate-limits",
"projects:sample-events",
"projects:servicehooks",
"projects:similarity-view",
"projects:similarity-indexing",
"projects:similarity-view-v2",
"projects:similarity-indexing-v2",
"projects:reprocessing-v2",
"projects:plugins",
)
}
)
#######################
# Email Configuration #
#######################
SENTRY_OPTIONS['mail.backend'] = os.getenv("SENTRY_EMAIL_BACKEND", "dummy")
SENTRY_OPTIONS['mail.use-tls'] = bool(strtobool(os.getenv("SENTRY_EMAIL_USE_TLS", "false")))
SENTRY_OPTIONS['mail.use-ssl'] = bool(strtobool(os.getenv("SENTRY_EMAIL_USE_SSL", "false")))
SENTRY_OPTIONS['mail.username'] = os.getenv("SENTRY_EMAIL_USERNAME", "")
SENTRY_OPTIONS['mail.password'] = os.getenv("SENTRY_EMAIL_PASSWORD", "")
SENTRY_OPTIONS['mail.port'] = int(os.getenv("SENTRY_EMAIL_PORT", "25"))
SENTRY_OPTIONS['mail.host'] = os.getenv("SENTRY_EMAIL_HOST", "")
SENTRY_OPTIONS['mail.from'] = os.getenv("SENTRY_EMAIL_FROM", "")
#########################
# Bitbucket Integration #
########################
# BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY'
# BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'
#########
# Relay #
#########
SENTRY_RELAY_WHITELIST_PK = []
SENTRY_RELAY_OPEN_REGISTRATION = True
# No Python Extension Config Given
---
# Source: sentry/templates/configmap-snuba.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sentry-snuba
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
data:
settings.py: |
import os
from snuba.settings import *
env = os.environ.get
DEBUG = env("DEBUG", "0").lower() in ("1", "true")
# Clickhouse Options
CLUSTERS = [
{
"host": env("CLICKHOUSE_HOST", "sentry-clickhouse"),
"port": int(9000),
"user": env("CLICKHOUSE_USER", "default"),
"password": env("CLICKHOUSE_PASSWORD", ""),
"database": env("CLICKHOUSE_DATABASE", "default"),
"http_port": 8123,
"storage_sets": {
"cdc",
"discover",
"events",
"events_ro",
"metrics",
"migrations",
"outcomes",
"querylog",
"sessions",
"transactions",
"transactions_ro",
"transactions_v2",
"errors_v2",
"errors_v2_ro",
"profiles",
"replays",
"generic_metrics_sets",
},
"single_node": False,
"cluster_name": "sentry-clickhouse",
"distributed_cluster_name": "sentry-clickhouse",
},
]
# Redis Options
REDIS_HOST = "sentry-sentry-redis-master"
REDIS_PORT = 6379
REDIS_PASSWORD = ""
REDIS_DB = int(env("REDIS_DB", 1))
# No Python Extension Config Given
---
# Source: sentry/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sentry-data
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
---
# Source: sentry/charts/rabbitmq/templates/role.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: sentry-rabbitmq-endpoint-reader
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
# Source: sentry/charts/rabbitmq/templates/rolebinding.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: sentry-rabbitmq-endpoint-reader
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: sentry-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: sentry-rabbitmq-endpoint-reader
---
# Source: sentry/charts/clickhouse/templates/svc-clickhouse-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-clickhouse-headless
labels:
app.kubernetes.io/name: clickhouse-headless
app.kubernetes.io/instance: sentry-headless
app.kubernetes.io/managed-by: Helm
spec:
clusterIP: "None"
ports:
- port: 9000
targetPort: tcp-port
protocol: TCP
name: tcp-port
- port: 8123
targetPort: http-port
protocol: TCP
name: http-port
- port: 9009
targetPort: inter-http-port
protocol: TCP
name: inter-http-port
selector:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/clickhouse/templates/svc-clickhouse.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-clickhouse
labels:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
ports:
- port: 9000
targetPort: tcp-port
protocol: TCP
name: tcp-port
- port: 8123
targetPort: http-port
protocol: TCP
name: http-port
- port: 9009
targetPort: inter-http-port
protocol: TCP
name: inter-http-port
selector:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/kafka/charts/zookeeper/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-zookeeper-headless
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper
helm.sh/chart: zookeeper-9.1.5
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-client
port: 2181
targetPort: client
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
---
# Source: sentry/charts/kafka/charts/zookeeper/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-zookeeper
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper
helm.sh/chart: zookeeper-9.1.5
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-client
port: 2181
targetPort: client
nodePort: null
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
---
# Source: sentry/charts/kafka/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-kafka-headless
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: kafka
helm.sh/chart: kafka-16.3.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kafka
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-client
port: 9092
protocol: TCP
targetPort: kafka-client
- name: tcp-internal
port: 9093
protocol: TCP
targetPort: kafka-internal
selector:
app.kubernetes.io/name: kafka
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: kafka
---
# Source: sentry/charts/kafka/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-kafka
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: kafka
helm.sh/chart: kafka-16.3.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kafka
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-client
port: 9092
protocol: TCP
targetPort: kafka-client
nodePort: null
selector:
app.kubernetes.io/name: kafka
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: kafka
---
# Source: sentry/charts/nginx/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-nginx
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: nginx
helm.sh/chart: nginx-12.0.4
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: http
port: 80
targetPort: http
selector:
app.kubernetes.io/name: nginx
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/postgresql/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-sentry-postgresql-headless
labels:
app.kubernetes.io/name: sentry-postgresql
helm.sh/chart: postgresql-10.16.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
namespace: infrastructure-prod
spec:
type: ClusterIP
clusterIP: None
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other Postgresql pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: sentry-postgresql
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/postgresql/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-sentry-postgresql
labels:
app.kubernetes.io/name: sentry-postgresql
helm.sh/chart: postgresql-10.16.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
annotations:
namespace: infrastructure-prod
spec:
type: ClusterIP
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: sentry-postgresql
app.kubernetes.io/instance: sentry
role: primary
---
# Source: sentry/charts/rabbitmq/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-rabbitmq-headless
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: http-stats
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: rabbitmq
app.kubernetes.io/instance: sentry
publishNotReadyAddresses: true
---
# Source: sentry/charts/rabbitmq/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-rabbitmq
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: null
- name: epmd
port: 4369
targetPort: epmd
nodePort: null
- name: dist
port: 25672
targetPort: dist
nodePort: null
- name: http-stats
port: 15672
targetPort: stats
nodePort: null
selector:
app.kubernetes.io/name: rabbitmq
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/redis/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-sentry-redis-headless
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
---
# Source: sentry/charts/redis/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-sentry-redis-master
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: master
---
# Source: sentry/charts/redis/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-sentry-redis-replicas
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: replica
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: replica
---
# Source: sentry/charts/zookeeper/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-zookeeper-clickhouse-headless
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper-clickhouse
helm.sh/chart: zookeeper-9.0.0
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-client
port: 2181
targetPort: client
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper-clickhouse
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
---
# Source: sentry/charts/zookeeper/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-zookeeper-clickhouse
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper-clickhouse
helm.sh/chart: zookeeper-9.0.0
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-client
port: 2181
targetPort: client
nodePort: null
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper-clickhouse
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
---
# Source: sentry/templates/service-relay.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-relay
annotations:
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: sentry-relay
selector:
app: sentry
role: relay
---
# Source: sentry/templates/service-sentry.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-web
annotations:
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
type: ClusterIP
ports:
- port: 9000
targetPort: 9000
protocol: TCP
name: sentry
selector:
app: sentry
role: web
---
# Source: sentry/templates/service-snuba.yaml
apiVersion: v1
kind: Service
metadata:
name: sentry-snuba
annotations:
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
type: ClusterIP
ports:
- port: 1218
targetPort: 1218
protocol: TCP
name: sentry
selector:
app: sentry
role: snuba-api
---
# Source: sentry/charts/nginx/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-nginx
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: nginx
helm.sh/chart: nginx-12.0.4
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
rollingUpdate: {}
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: nginx
app.kubernetes.io/instance: sentry
template:
metadata:
labels:
app.kubernetes.io/name: nginx
helm.sh/chart: nginx-12.0.4
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
annotations:
spec:
automountServiceAccountToken: false
shareProcessNamespace: false
serviceAccountName: default
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: nginx
app.kubernetes.io/instance: sentry
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
hostNetwork: false
hostIPC: false
initContainers:
containers:
- name: nginx
image: docker.io/bitnami/nginx:1.22.0-debian-11-r3
imagePullPolicy: "IfNotPresent"
env:
- name: BITNAMI_DEBUG
value: "false"
envFrom:
ports:
- name: http
containerPort: 8080
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: http
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
tcpSocket:
port: http
resources:
limits: {}
requests: {}
volumeMounts:
- name: nginx-server-block
mountPath: /opt/bitnami/nginx/conf/server_blocks
volumes:
- name: nginx-server-block
configMap:
name: sentry-nginx
---
# Source: sentry/templates/deployment-sentry-cron.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-cron
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: cron
replicas: 1
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 721120367bee8253bdefa95be2b5282a544569fa938972321d7ef43b58b9b31b
labels:
app: sentry
release: "sentry"
role: cron
spec:
affinity:
containers:
- name: sentry-cron
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry"]
args:
- "run"
- "cron"
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: C_FORCE_ROOT
value: "true"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/deployment-sentry-web.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-web
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
revisionHistoryLimit: 10
strategy:
type: RollingUpdate
selector:
matchLabels:
app: sentry
release: "sentry"
role: web
replicas: 1
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: e725a65a4a22b12e41b799b620f74f52b95bdf6d6d1af95c8a627de302d12c9a
labels:
app: sentry
release: "sentry"
role: web
spec:
affinity:
containers:
- name: sentry-web
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry", "run", "web"]
ports:
- containerPort: 9000
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
livenessProbe:
failureThreshold: 5
httpGet:
path: /_health/
port: 9000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
readinessProbe:
failureThreshold: 5
httpGet:
path: /_health/
port: 9000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
persistentVolumeClaim:
claimName: sentry-data
---
# Source: sentry/templates/deployment-sentry-worker.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-worker
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: worker
replicas: 3
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: d03d07d378c02472b1510daabc8ad5085fdb4c675a48a372097b3ab9450fc1b2
labels:
app: sentry
release: "sentry"
role: worker
spec:
affinity:
containers:
- name: sentry-worker
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry"]
args:
- "run"
- "worker"
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: C_FORCE_ROOT
value: "true"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
{}
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/deployment-snuba-api.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sentry-snuba-api
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: sentry
release: "sentry"
role: snuba-api
replicas: 1
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
role: snuba-api
spec:
affinity:
containers:
- name: sentry-snuba
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 1218
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
- name: DEFAULT_BROKERS
value: "sentry-kafka:9092"
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
livenessProbe:
failureThreshold: 5
httpGet:
path: /
port: 1218
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
readinessProbe:
failureThreshold: 10
httpGet:
path: /
port: 1218
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
resources:
{}
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/charts/clickhouse/templates/statefulset-clickhouse.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-clickhouse
labels:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
serviceName: sentry-clickhouse-headless
selector:
matchLabels:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: sentry
template:
metadata:
annotations:
checksum/config: 8bd2c93f89a6bf2126bf5fd40a1f5d3452adf072563105739c92f1f2f3381b2d
labels:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: sentry
spec:
initContainers:
- name: init
image: busybox:1.31.0
imagePullPolicy: IfNotPresent
args:
- /bin/sh
- -c
- |
mkdir -p /etc/clickhouse-server/metrica.d
containers:
- name: sentry-clickhouse
image: yandex/clickhouse-server:20.8.19.4
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -c
- export SHARD=${HOSTNAME##*-} && /entrypoint.sh
ports:
- name: http-port
containerPort: 8123
- name: tcp-port
containerPort: 9000
- name: inter-http-port
containerPort: 9009
livenessProbe:
tcpSocket:
port: 9000
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
tcpSocket:
port: 9000
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
volumeMounts:
- name: sentry-clickhouse-data
mountPath: /var/lib/clickhouse
- name: sentry-clickhouse-logs
mountPath: /var/log/clickhouse-server
- name: sentry-clickhouse-config
mountPath: /etc/clickhouse-server/config.d
- name: sentry-clickhouse-metrica
mountPath: /etc/clickhouse-server/metrica.d
- name: sentry-clickhouse-users
mountPath: /etc/clickhouse-server/users.d
volumes:
- name: sentry-clickhouse-data
persistentVolumeClaim:
claimName: sentry-clickhouse-data
- name: sentry-clickhouse-logs
emptyDir: {}
- name: sentry-clickhouse-config
configMap:
name: sentry-clickhouse-config
items:
- key: config.xml
path: config.xml
- name: sentry-clickhouse-metrica
configMap:
name: sentry-clickhouse-metrica
items:
- key: metrica.xml
path: metrica.xml
- name: sentry-clickhouse-users
configMap:
name: sentry-clickhouse-users
items:
- key: users.xml
path: users.xml
volumeClaimTemplates:
- metadata:
name: sentry-clickhouse-data
labels:
app.kubernetes.io/name: clickhouse-data
app.kubernetes.io/instance: sentry-data
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "30Gi"
---
# Source: sentry/charts/kafka/charts/zookeeper/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-zookeeper
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper
helm.sh/chart: zookeeper-9.1.5
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
role: zookeeper
spec:
replicas: 1
podManagementPolicy: Parallel
selector:
matchLabels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
serviceName: sentry-zookeeper-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: zookeeper
helm.sh/chart: zookeeper-9.1.5
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
spec:
serviceAccountName: default
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: zookeeper
image: docker.io/bitnami/zookeeper:3.8.0-debian-10-r63
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /scripts/setup.sh
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
env:
- name: BITNAMI_DEBUG
value: "false"
- name: ZOO_DATA_LOG_DIR
value: ""
- name: ZOO_PORT_NUMBER
value: "2181"
- name: ZOO_TICK_TIME
value: "2000"
- name: ZOO_INIT_LIMIT
value: "10"
- name: ZOO_SYNC_LIMIT
value: "5"
- name: ZOO_PRE_ALLOC_SIZE
value: "65536"
- name: ZOO_SNAPCOUNT
value: "100000"
- name: ZOO_MAX_CLIENT_CNXNS
value: "60"
- name: ZOO_4LW_COMMANDS_WHITELIST
value: "srvr, mntr, ruok"
- name: ZOO_LISTEN_ALLIPS_ENABLED
value: "no"
- name: ZOO_AUTOPURGE_INTERVAL
value: "0"
- name: ZOO_AUTOPURGE_RETAIN_COUNT
value: "3"
- name: ZOO_MAX_SESSION_TIMEOUT
value: "40000"
- name: ZOO_SERVERS
value: sentry-zookeeper-0.sentry-zookeeper-headless.infrastructure-prod.svc.cluster.local:2888:3888::1
- name: ZOO_ENABLE_AUTH
value: "no"
- name: ZOO_HEAP_SIZE
value: "1024"
- name: ZOO_LOG_LEVEL
value: "ERROR"
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
ports:
- name: client
containerPort: 2181
- name: follower
containerPort: 2888
- name: election
containerPort: 3888
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
volumeMounts:
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
- name: data
mountPath: /bitnami/zookeeper
volumes:
- name: scripts
configMap:
name: sentry-zookeeper-scripts
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: data
annotations:
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/charts/kafka/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-kafka
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: kafka
helm.sh/chart: kafka-16.3.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kafka
spec:
podManagementPolicy: Parallel
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: kafka
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: kafka
serviceName: sentry-kafka-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: kafka
helm.sh/chart: kafka-16.3.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: kafka
annotations:
spec:
hostNetwork: false
hostIPC: false
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: kafka
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: kafka
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
serviceAccountName: sentry-kafka
containers:
- name: kafka
image: docker.io/bitnami/kafka:3.1.1-debian-10-r6
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /scripts/setup.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KAFKA_CFG_ZOOKEEPER_CONNECT
value: "sentry-zookeeper"
- name: KAFKA_INTER_BROKER_LISTENER_NAME
value: "INTERNAL"
- name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP
value: "INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT"
- name: KAFKA_CFG_LISTENERS
value: "INTERNAL://:9093,CLIENT://:9092"
- name: KAFKA_CFG_ADVERTISED_LISTENERS
value: "INTERNAL://$(MY_POD_NAME).sentry-kafka-headless.infrastructure-prod.svc.cluster.local:9093,CLIENT://$(MY_POD_NAME).sentry-kafka-headless.infrastructure-prod.svc.cluster.local:9092"
- name: ALLOW_PLAINTEXT_LISTENER
value: "yes"
- name: KAFKA_ZOOKEEPER_PROTOCOL
value: PLAINTEXT
- name: KAFKA_VOLUME_DIR
value: "/bitnami/kafka"
- name: KAFKA_LOG_DIR
value: "/opt/bitnami/kafka/logs"
- name: KAFKA_CFG_DELETE_TOPIC_ENABLE
value: "false"
- name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE
value: "true"
- name: KAFKA_HEAP_OPTS
value: "-Xmx1024m -Xms1024m"
- name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES
value: "10000"
- name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS
value: "1000"
- name: KAFKA_CFG_LOG_RETENTION_BYTES
value: "1073741824"
- name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS
value: "300000"
- name: KAFKA_CFG_LOG_RETENTION_HOURS
value: "168"
- name: KAFKA_CFG_MESSAGE_MAX_BYTES
value: "50000000"
- name: KAFKA_CFG_LOG_SEGMENT_BYTES
value: "1073741824"
- name: KAFKA_CFG_LOG_DIRS
value: "/bitnami/kafka/data"
- name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR
value: "3"
- name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR
value: "3"
- name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR
value: "3"
- name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR
value: "3"
- name: KAFKA_CFG_NUM_IO_THREADS
value: "8"
- name: KAFKA_CFG_NUM_NETWORK_THREADS
value: "3"
- name: KAFKA_CFG_NUM_PARTITIONS
value: "1"
- name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR
value: "1"
- name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES
value: "102400"
- name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES
value: "50000000"
- name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES
value: "102400"
- name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS
value: "6000"
- name: KAFKA_CFG_AUTHORIZER_CLASS_NAME
value: ""
- name: KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND
value: "true"
- name: KAFKA_CFG_SUPER_USERS
value: "User:admin"
ports:
- name: kafka-client
containerPort: 9092
- name: kafka-internal
containerPort: 9093
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: kafka-client
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: kafka-client
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/kafka
- name: logs
mountPath: /opt/bitnami/kafka/logs
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
volumes:
- name: scripts
configMap:
name: sentry-kafka-scripts
defaultMode: 0755
- name: logs
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/charts/postgresql/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-sentry-postgresql
labels:
app.kubernetes.io/name: sentry-postgresql
helm.sh/chart: postgresql-10.16.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
annotations:
namespace: infrastructure-prod
spec:
serviceName: sentry-sentry-postgresql-headless
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: sentry-postgresql
app.kubernetes.io/instance: sentry
role: primary
template:
metadata:
name: sentry-sentry-postgresql
labels:
app.kubernetes.io/name: sentry-postgresql
helm.sh/chart: postgresql-10.16.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
role: primary
app.kubernetes.io/component: primary
spec:
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: sentry-postgresql
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: primary
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
automountServiceAccountToken: false
containers:
- name: sentry-sentry-postgresql
image: docker.io/bitnami/postgresql:11.14.0-debian-10-r28
imagePullPolicy: "IfNotPresent"
resources:
requests:
cpu: 250m
memory: 256Mi
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: POSTGRESQL_PORT_NUMBER
value: "5432"
- name: POSTGRESQL_VOLUME_DIR
value: "/bitnami/postgresql"
- name: PGDATA
value: "/bitnami/postgresql/data"
- name: POSTGRES_USER
value: "postgres"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
- name: POSTGRES_DB
value: "sentry"
- name: POSTGRESQL_ENABLE_LDAP
value: "no"
- name: POSTGRESQL_ENABLE_TLS
value: "no"
- name: POSTGRESQL_LOG_HOSTNAME
value: "false"
- name: POSTGRESQL_LOG_CONNECTIONS
value: "false"
- name: POSTGRESQL_LOG_DISCONNECTIONS
value: "false"
- name: POSTGRESQL_PGAUDIT_LOG_CATALOG
value: "off"
- name: POSTGRESQL_CLIENT_MIN_MESSAGES
value: "error"
- name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
value: "pgaudit"
ports:
- name: tcp-postgresql
containerPort: 5432
livenessProbe:
exec:
command:
- /bin/sh
- -c
- exec pg_isready -U "postgres" -d "dbname=sentry" -h 127.0.0.1 -p 5432
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/sh
- -c
- -e
- |
exec pg_isready -U "postgres" -d "dbname=sentry" -h 127.0.0.1 -p 5432
[ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: data
mountPath: /bitnami/postgresql
subPath:
volumes:
- name: dshm
emptyDir:
medium: Memory
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/charts/rabbitmq/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-rabbitmq
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
spec:
serviceName: sentry-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 3
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: rabbitmq
app.kubernetes.io/instance: sentry
template:
metadata:
labels:
app.kubernetes.io/name: rabbitmq
helm.sh/chart: rabbitmq-8.32.2
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
annotations:
checksum/config: bda11477cbb6a33235db045ebe8c3ead619193519420c1d0b0c55f1ef11054ff
checksum/secret: babf0fe4aa34d34b97e834e09bc2b3244531b2af5d46a971f1f80e2709a1d9b3
spec:
serviceAccountName: sentry-rabbitmq
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: rabbitmq
app.kubernetes.io/instance: sentry
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
runAsUser: 1001
terminationGracePeriodSeconds: 120
containers:
- name: rabbitmq
image: docker.io/bitnami/rabbitmq:3.9.16-debian-10-r0
imagePullPolicy: "IfNotPresent"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "sentry-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "yes"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: sentry-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_CLUSTER_REBALANCE
value: "true"
- name: RABBITMQ_LOAD_DEFINITIONS
value: "yes"
- name: RABBITMQ_DEFINITIONS_FILE
value: "/app/load_definition.json"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "guest"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: stats
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
- name: load-definition-volume
mountPath: /app
readOnly: true
volumes:
- name: configuration
configMap:
name: sentry-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: load-definition-volume
secret:
secretName: "load-definition"
volumeClaimTemplates:
- metadata:
name: data
labels:
app.kubernetes.io/name: rabbitmq
app.kubernetes.io/instance: sentry
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/charts/redis/templates/master/application.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-sentry-redis-master
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: master
serviceName: sentry-sentry-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
annotations:
checksum/configmap: 66a84499776e7002d8a81e06339571812f49df8620df929ca1c37c78ac96d465
checksum/health: f5869e6c4f61bc6462262280fbab70daa05e465ef3f84417b93d87970068f6f5
checksum/scripts: 5993695ce3dc0e9fb9930990c3d98068773da465aa5b7aa2ef5fb9be51b0d7be
checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
spec:
securityContext:
fsGroup: 1001
serviceAccountName: sentry-sentry-redis
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: master
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: docker.io/bitnami/redis:6.2.7-debian-11-r3
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits: {}
requests: {}
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: sentry-sentry-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: sentry-sentry-redis-health
defaultMode: 0755
- name: config
configMap:
name: sentry-sentry-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: redis-data
labels:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: master
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/charts/redis/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-sentry-redis-replicas
namespace: "infrastructure-prod"
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: replica
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: replica
serviceName: sentry-sentry-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: sentry-redis
helm.sh/chart: redis-16.12.1
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: replica
annotations:
checksum/configmap: 66a84499776e7002d8a81e06339571812f49df8620df929ca1c37c78ac96d465
checksum/health: f5869e6c4f61bc6462262280fbab70daa05e465ef3f84417b93d87970068f6f5
checksum/scripts: 5993695ce3dc0e9fb9930990c3d98068773da465aa5b7aa2ef5fb9be51b0d7be
checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
spec:
securityContext:
fsGroup: 1001
serviceAccountName: sentry-sentry-redis
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: replica
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: docker.io/bitnami/redis:6.2.7-debian-11-r3
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: sentry-sentry-redis-master-0.sentry-sentry-redis-headless.infrastructure-prod.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
startupProbe:
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: redis
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits: {}
requests: {}
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: sentry-sentry-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: sentry-sentry-redis-health
defaultMode: 0755
- name: config
configMap:
name: sentry-sentry-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: redis-data
labels:
app.kubernetes.io/name: sentry-redis
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: replica
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/charts/zookeeper/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: sentry-zookeeper-clickhouse
namespace: infrastructure-prod
labels:
app.kubernetes.io/name: zookeeper-clickhouse
helm.sh/chart: zookeeper-9.0.0
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
role: zookeeper
spec:
replicas: 3
podManagementPolicy: Parallel
selector:
matchLabels:
app.kubernetes.io/name: zookeeper-clickhouse
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
serviceName: sentry-zookeeper-clickhouse-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: zookeeper-clickhouse
helm.sh/chart: zookeeper-9.0.0
app.kubernetes.io/instance: sentry
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: zookeeper
spec:
serviceAccountName: default
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: zookeeper-clickhouse
app.kubernetes.io/instance: sentry
app.kubernetes.io/component: zookeeper
namespaces:
- "infrastructure-prod"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: zookeeper
image: docker.io/bitnami/zookeeper:3.8.0-debian-10-r0
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /scripts/setup.sh
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
env:
- name: BITNAMI_DEBUG
value: "false"
- name: ZOO_DATA_LOG_DIR
value: ""
- name: ZOO_PORT_NUMBER
value: "2181"
- name: ZOO_TICK_TIME
value: "2000"
- name: ZOO_INIT_LIMIT
value: "10"
- name: ZOO_SYNC_LIMIT
value: "5"
- name: ZOO_PRE_ALLOC_SIZE
value: "65536"
- name: ZOO_SNAPCOUNT
value: "100000"
- name: ZOO_MAX_CLIENT_CNXNS
value: "60"
- name: ZOO_4LW_COMMANDS_WHITELIST
value: "srvr, mntr, ruok"
- name: ZOO_LISTEN_ALLIPS_ENABLED
value: "no"
- name: ZOO_AUTOPURGE_INTERVAL
value: "0"
- name: ZOO_AUTOPURGE_RETAIN_COUNT
value: "3"
- name: ZOO_MAX_SESSION_TIMEOUT
value: "40000"
- name: ZOO_SERVERS
value: sentry-zookeeper-clickhouse-0.sentry-zookeeper-clickhouse-headless.infrastructure-prod.svc.cluster.local:2888:3888::1 sentry-zookeeper-clickhouse-1.sentry-zookeeper-clickhouse-headless.infrastructure-prod.svc.cluster.local:2888:3888::2 sentry-zookeeper-clickhouse-2.sentry-zookeeper-clickhouse-headless.infrastructure-prod.svc.cluster.local:2888:3888::3
- name: ZOO_ENABLE_AUTH
value: "no"
- name: ZOO_HEAP_SIZE
value: "1024"
- name: ZOO_LOG_LEVEL
value: "ERROR"
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
ports:
- name: client
containerPort: 2181
- name: follower
containerPort: 2888
- name: election
containerPort: 3888
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
volumeMounts:
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
- name: data
mountPath: /bitnami/zookeeper
volumes:
- name: scripts
configMap:
name: sentry-zookeeper-clickhouse-scripts
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: data
annotations:
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: sentry/templates/cronjob-sentry-cleanup.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: sentry-sentry-cleanup
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
schedule: "0 0 * * *"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
concurrencyPolicy: "Allow"
jobTemplate:
spec:
activeDeadlineSeconds: 100
template:
metadata:
annotations:
checksum/configYml: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/sentryConfPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 9494f8f4a42a81156dc5e1fcf076ae729ba6cf93e8c3fe09963b0de9a722f3b9
labels:
app: sentry
release: "sentry"
spec:
affinity:
containers:
- name: sentry-sentry-cleanup
image: "getsentry/sentry:22.6.0"
imagePullPolicy: IfNotPresent
command: ["sentry"]
args:
- "cleanup"
- "--days"
- "90"
env:
- name: SNUBA
value: http://sentry-snuba:1218
- name: C_FORCE_ROOT
value: "true"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: sentry-sentry-postgresql
key: postgresql-password
volumeMounts:
- mountPath: /etc/sentry
name: config
readOnly: true
- mountPath: /var/lib/sentry/files
name: sentry-data
resources:
null
restartPolicy: Never
volumes:
- name: config
configMap:
name: sentry-sentry
- name: sentry-data
emptyDir: {}
---
# Source: sentry/templates/cronjob-snuba-cleanup-errors.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: sentry-snuba-cleanup-errors
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
schedule: "0 * * * *"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit:
concurrencyPolicy: "Allow"
jobTemplate:
spec:
activeDeadlineSeconds: 100
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
spec:
affinity:
containers:
- name: sentry-snuba-cleanup-errors
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "cleanup"
- "--storage"
- "errors"
- "--dry-run"
- "False"
- "--clickhouse-host"
- "sentry-clickhouse"
- "--clickhouse-port"
- "9000"
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
null
restartPolicy: Never
volumes:
- name: config
configMap:
name: sentry-snuba
---
# Source: sentry/templates/cronjob-snuba-cleanup-transactions.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: sentry-snuba-cleanup-transactions
labels:
app: sentry
chart: "sentry-15.0.8"
release: "sentry"
heritage: "Helm"
spec:
schedule: "0 * * * *"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
concurrencyPolicy: "Allow"
jobTemplate:
spec:
activeDeadlineSeconds: 100
template:
metadata:
annotations:
checksum/snubaSettingsPy: d5f85a6a8afbc55eebe23801e1a51a0fb4c0428c9a73ef6708d8dc83e079cd49
checksum/config.yaml: 3451a0aac4b25b6e32f560c51b39a7064610e67eebb5d8cc6efaed7a7fa384b0
labels:
app: sentry
release: "sentry"
spec:
affinity:
containers:
- name: sentry-snuba-cleanup-errors
image: "getsentry/snuba:22.6.0"
imagePullPolicy: IfNotPresent
command:
- "snuba"
- "cleanup"
- "--storage"
- "transactions"
- "--dry-run"
- "False"
- "--clickhouse-host"
- "sentry-clickhouse"
- "--clickhouse-port"
- "9000"
env:
- name: SNUBA_SETTINGS
value: /etc/snuba/settings.py
envFrom:
- secretRef:
name: sentry-snuba-env
volumeMounts:
- mountPath: /etc/snuba
name: config
readOnly: true
resources:
null
restartPolicy: Never
volumes:
- name: config
configMap:
name: sentry-snuba
NOTES:
* When running upgrades, make sure to give back the `system.secretKey` value.
kubectl -n infrastructure-prod get configmap sentry-sentry -o json | grep -m1 -Po '(?<=system.secret-key: )[^\\]*'