Showing
1 changed file
with
10 additions
and
10 deletions
... | ... | @@ -28,9 +28,9 @@ server: |
28 | 28 | # Zookeeper connection parameters. Used for service discovery. |
29 | 29 | zk: |
30 | 30 | # Enable/disable zookeeper discovery service. |
31 | - enabled: "${ZOOKEEPER_ENABLED:true}" | |
31 | + enabled: "${ZOOKEEPER_ENABLED:false}" | |
32 | 32 | # Zookeeper connect string |
33 | - url: "${ZOOKEEPER_URL:123.60.37.18:22181,124.70.179.189:22181,124.71.191.130:22181}" | |
33 | + url: "${ZOOKEEPER_URL:localhost:2181}" | |
34 | 34 | # Zookeeper retry interval in milliseconds |
35 | 35 | retry_interval_ms: "${ZOOKEEPER_RETRY_INTERVAL_MS:3000}" |
36 | 36 | # Zookeeper connection timeout in milliseconds |
... | ... | @@ -61,14 +61,14 @@ redis: |
61 | 61 | usePoolConfig: "${REDIS_CLIENT_USE_POOL_CONFIG:false}" |
62 | 62 | cluster: |
63 | 63 | # Comma-separated list of "host:port" pairs to bootstrap from. |
64 | - nodes: "${REDIS_NODES:123.60.37.18:6379,124.70.179.189:6379,124.71.191.130:6379}" | |
64 | + nodes: "${REDIS_NODES:}" | |
65 | 65 | # Maximum number of redirects to follow when executing commands across the cluster. |
66 | 66 | max-redirects: "${REDIS_MAX_REDIRECTS:12}" |
67 | 67 | useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" |
68 | 68 | # db index |
69 | - db: "${REDIS_DB:15}" | |
69 | + db: "${REDIS_DB:0}" | |
70 | 70 | # db password |
71 | - password: "${REDIS_PASSWORD:redis@6379}" | |
71 | + password: "${REDIS_PASSWORD:}" | |
72 | 72 | # pool config |
73 | 73 | pool_config: |
74 | 74 | maxTotal: "${REDIS_POOL_CONFIG_MAX_TOTAL:128}" |
... | ... | @@ -190,7 +190,7 @@ transport: |
190 | 190 | queue: |
191 | 191 | type: "${TB_QUEUE_TYPE:kafka}" # kafka (Apache Kafka) or aws-sqs (AWS SQS) or pubsub (PubSub) or service-bus (Azure Service Bus) or rabbitmq (RabbitMQ) |
192 | 192 | kafka: |
193 | - bootstrap.servers: "${TB_KAFKA_SERVERS:139.198.106.153:9092}" | |
193 | + bootstrap.servers: "${TB_KAFKA_SERVERS:localhost:9092}" | |
194 | 194 | acks: "${TB_KAFKA_ACKS:all}" |
195 | 195 | retries: "${TB_KAFKA_RETRIES:1}" |
196 | 196 | batch.size: "${TB_KAFKA_BATCH_SIZE:16384}" |
... | ... | @@ -201,10 +201,10 @@ queue: |
201 | 201 | replication_factor: "${TB_QUEUE_KAFKA_REPLICATION_FACTOR:1}" |
202 | 202 | use_confluent_cloud: "${TB_QUEUE_KAFKA_USE_CONFLUENT_CLOUD:false}" |
203 | 203 | confluent: |
204 | - ssl.algorithm: "${TB_QUEUE_KAFKA_CONFLUENT_SSL_ALGORITHM:http}" | |
205 | - sasl.mechanism: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_MECHANISM:SCRAM-SHA-512}" | |
206 | - sasl.config: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_JAAS_CONFIG:org.apache.kafka.common.security.scram.ScramLoginModule required username=\"thingskit\" password=\"thingskit\";}" | |
207 | - security.protocol: "${TB_QUEUE_KAFKA_CONFLUENT_SECURITY_PROTOCOL:SASL_PLAINTEXT}" | |
204 | + ssl.algorithm: "${TB_QUEUE_KAFKA_CONFLUENT_SSL_ALGORITHM:https}" | |
205 | + sasl.mechanism: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_MECHANISM:PLAIN}" | |
206 | + sasl.config: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_JAAS_CONFIG:org.apache.kafka.common.security.plain.PlainLoginModule required username=\"CLUSTER_API_KEY\" password=\"CLUSTER_API_SECRET\";}" | |
207 | + security.protocol: "${TB_QUEUE_KAFKA_CONFLUENT_SECURITY_PROTOCOL:SASL_SSL}" | |
208 | 208 | other: # In this section you can specify custom parameters for Kafka consumer/producer and expose the env variables to configure outside |
209 | 209 | - key: "request.timeout.ms" # refer to https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#producerconfigs_request.timeout.ms |
210 | 210 | value: "${TB_QUEUE_KAFKA_REQUEST_TIMEOUT_MS:30000}" # (30 seconds) | ... | ... |