Commit 2d747765219800e7b4d9ba154713b2f92b1d3619

Authored by 芯火源
1 parent d1926c99

fix(DEFECT-1963): temp

... ... @@ -99,9 +99,9 @@ app:
99 99 # Zookeeper connection parameters. Used for service discovery.
100 100 zk:
101 101 # Enable/disable zookeeper discovery service.
102   - enabled: "${ZOOKEEPER_ENABLED:false}"
  102 + enabled: "${ZOOKEEPER_ENABLED:true}"
103 103 # Zookeeper connect string
104   - url: "${ZOOKEEPER_URL:localhost:2181}"
  104 + url: "${ZOOKEEPER_URL:123.60.37.18:22181,124.70.179.189:22181,124.71.191.130:22181}"
105 105 # Zookeeper retry interval in milliseconds
106 106 retry_interval_ms: "${ZOOKEEPER_RETRY_INTERVAL_MS:3000}"
107 107 # Zookeeper connection timeout in milliseconds
... ... @@ -109,7 +109,7 @@ zk:
109 109 # Zookeeper session timeout in milliseconds
110 110 session_timeout_ms: "${ZOOKEEPER_SESSION_TIMEOUT_MS:3000}"
111 111 # Name of the directory in zookeeper 'filesystem'
112   - zk_dir: "${ZOOKEEPER_NODES_DIR:/thingsboard}"
  112 + zk_dir: "${ZOOKEEPER_NODES_DIR:/thingsKit}"
113 113
114 114 cluster:
115 115 stats:
... ... @@ -377,7 +377,7 @@ actors:
377 377
378 378 cache:
379 379 # caffeine or redis
380   - type: "${CACHE_TYPE:caffeine}"
  380 + type: "${CACHE_TYPE:redis}"
381 381 maximumPoolSize: "${CACHE_MAXIMUM_POOL_SIZE:16}" # max pool size to process futures that calls the external cache
382 382 attributes:
383 383 # make sure that if cache.type is 'redis' and cache.attributes.enabled is 'true' that you change 'maxmemory-policy' Redis config property to 'allkeys-lru', 'allkeys-lfu' or 'allkeys-random'
... ... @@ -460,7 +460,7 @@ caffeine:
460 460 redis:
461 461 # standalone or cluster
462 462 connection:
463   - type: "${REDIS_CONNECTION_TYPE:standalone}"
  463 + type: "${REDIS_CONNECTION_TYPE:cluster}"
464 464 standalone:
465 465 host: "${REDIS_HOST:localhost}"
466 466 port: "${REDIS_PORT:6379}"
... ... @@ -475,14 +475,14 @@ redis:
475 475 usePoolConfig: "${REDIS_CLIENT_USE_POOL_CONFIG:false}"
476 476 cluster:
477 477 # Comma-separated list of "host:port" pairs to bootstrap from.
478   - nodes: "${REDIS_NODES:}"
  478 + nodes: "${REDIS_NODES:123.60.37.18:6379,124.70.179.189:6379,124.71.191.130:6379}"
479 479 # Maximum number of redirects to follow when executing commands across the cluster.
480 480 max-redirects: "${REDIS_MAX_REDIRECTS:12}"
481 481 useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
482 482 # db index
483 483 db: "${REDIS_DB:0}"
484 484 # db password
485   - password: "${REDIS_PASSWORD:redis@6379}"
  485 + password: "${REDIS_PASSWORD:thingskit}"
486 486 # pool config
487 487 pool_config:
488 488 maxTotal: "${REDIS_POOL_CONFIG_MAX_TOTAL:128}"
... ... @@ -553,9 +553,9 @@ spring:
553 553 database-platform: "${SPRING_JPA_DATABASE_PLATFORM:org.hibernate.dialect.PostgreSQLDialect}"
554 554 datasource:
555 555 driverClassName: "${SPRING_DRIVER_CLASS_NAME:org.postgresql.Driver}"
556   - url: "${SPRING_DATASOURCE_URL:jdbc:postgresql://localhost:5432/postgres}"
  556 + url: "${SPRING_DATASOURCE_URL:jdbc:postgresql://222.180.200.114:20638/test}"
557 557 username: "${SPRING_DATASOURCE_USERNAME:postgres}"
558   - password: "${SPRING_DATASOURCE_PASSWORD:postgres}"
  558 + password: "${SPRING_DATASOURCE_PASSWORD:Thw770!!uriq}"
559 559 hikari:
560 560 maximumPoolSize: "${SPRING_DATASOURCE_MAXIMUM_POOL_SIZE:16}"
561 561 # Audit log parameters
... ... @@ -603,8 +603,8 @@ audit-log:
603 603
604 604 state:
605 605 # Should be greater then transport.sessions.report_timeout
606   - defaultInactivityTimeoutInSec: "${DEFAULT_INACTIVITY_TIMEOUT:600}"
607   - defaultStateCheckIntervalInSec: "${DEFAULT_STATE_CHECK_INTERVAL:60}"
  606 + defaultInactivityTimeoutInSec: "${DEFAULT_INACTIVITY_TIMEOUT:60}"
  607 + defaultStateCheckIntervalInSec: "${DEFAULT_STATE_CHECK_INTERVAL:6}"
608 608 persistToTelemetry: "${PERSIST_STATE_TO_TELEMETRY:false}"
609 609
610 610 js:
... ... @@ -667,7 +667,7 @@ transport:
667 667 # Local MQTT transport parameters
668 668 mqtt:
669 669 # Enable/disable mqtt transport protocol.
670   - enabled: "${MQTT_ENABLED:true}"
  670 + enabled: "${MQTT_ENABLED:false}"
671 671 bind_address: "${MQTT_BIND_ADDRESS:0.0.0.0}"
672 672 bind_port: "${MQTT_BIND_PORT:1883}"
673 673 # Enable proxy protocol support. Disabled by default. If enabled, supports both v1 and v2.
... ... @@ -726,29 +726,29 @@ transport:
726 726 # 如果要监听多张网卡,可以使用逗号分隔多个IP, 例如: 192.168.1.4,10.0.0.4
727 727 # 如果不明白,就使用0.0.0.0,大部分情况都是可以的
728 728 # 请不要使用127.0.0.1,任何包括localhost在内的域名都是不可以的。
729   - ip: ${GBT28181_SIP_IP:127.0.0.1}
  729 + ip: ${GBT28181_SIP_IP:192.168.1.22}
730 730 # [可选] 28181服务监听的端口
731 731 port: ${GBT28181_SIP_PORT:5060}
732 732 #[可选]
733   - id: ${GBT28181_SIP_ID:51010700599000000001}
  733 + id: ${GBT28181_SIP_ID:44010200492000000001}
734 734 # 根据国标6.1.2中规定,domain宜采用ID统一编码的前十位编码。国标附录D中定义前8位为中心编码(由省级、市级、区级、基层编号组成,参照GB/T 2260-2007)
735 735 # 后两位为行业编码,定义参照附录D.3
736 736 # 3701020049标识山东济南历下区 信息行业接入
737 737 # [可选]
738   - domain: ${GBT28181_SIP_DOMAIN:5101070059}
  738 + domain: ${GBT28181_SIP_DOMAIN:4401020049}
739 739 #[可选]
740 740 password: ${GBT28181_SIP_PASSWORD:61332286}
741 741 #zlm 默认服务器配置
742 742 media:
743   - id: ${GBT28181_MEDIA_GENERAL_ID:D2okJWKKaQ5bX7Va}
  743 + id: ${GBT28181_MEDIA_GENERAL_ID:f6GfbO0BGEaROKLP}
744 744 # [必须修改] zlm服务器的内网IP
745   - ip: ${GBT28181_MEDIA_IP:127.0.0.1}
  745 + ip: ${GBT28181_MEDIA_IP:192.168.1.35}
746 746 # [必须修改] zlm服务器的http.port
747 747 http-port: ${GBT28181_MEDIA_HTTP_PORT:28080}
748 748 hook-ip: ${GBT28181_MEDIA_HOOK_IP:}
749 749 stream-ip: ${GBT28181_MEDIA_STREAM_IP:}
750 750 # [可选] zlm服务器的hook.admin_params=secret
751   - secret: ${GBT28181_MEDIA_API_SECRET:QhrTN7k6HcDnt0YyeolwHwiVYDgIHPMZ}
  751 + secret: ${GBT28181_MEDIA_API_SECRET:5PnbPDCcxQGeK15OowHPPdSgort2Cx9Y}
752 752 # 启用多端口模式, 多端口模式使用端口区分每路流,兼容性更好。 单端口使用流的ssrc区分, 点播超时建议使用多端口测试
753 753 rtp:
754 754 # [可选] 是否启用多端口模式, 开启后会在portRange范围内选择端口用于媒体流传输
... ... @@ -1012,13 +1012,13 @@ swagger:
1012 1012 version: "${SWAGGER_VERSION:}"
1013 1013
1014 1014 queue:
1015   - type: "${TB_QUEUE_TYPE:in-memory}" # in-memory or kafka (Apache Kafka) or aws-sqs (AWS SQS) or pubsub (PubSub) or service-bus (Azure Service Bus) or rabbitmq (RabbitMQ)
  1015 + type: "${TB_QUEUE_TYPE:kafka}" # in-memory or kafka (Apache Kafka) or aws-sqs (AWS SQS) or pubsub (PubSub) or service-bus (Azure Service Bus) or rabbitmq (RabbitMQ)
1016 1016 in_memory:
1017 1017 stats:
1018 1018 # For debug lvl
1019 1019 print-interval-ms: "${TB_QUEUE_IN_MEMORY_STATS_PRINT_INTERVAL_MS:60000}"
1020 1020 kafka:
1021   - bootstrap.servers: "${TB_KAFKA_SERVERS:localhost:9092}"
  1021 + bootstrap.servers: "${TB_KAFKA_SERVERS:123.60.37.18:29092,124.70.179.189:29092,124.71.191.130:29092}"
1022 1022 acks: "${TB_KAFKA_ACKS:all}"
1023 1023 retries: "${TB_KAFKA_RETRIES:1}"
1024 1024 compression.type: "${TB_KAFKA_COMPRESSION_TYPE:none}" # none or gzip
... ... @@ -1032,11 +1032,11 @@ queue:
1032 1032 max_poll_records: "${TB_QUEUE_KAFKA_MAX_POLL_RECORDS:8192}"
1033 1033 max_partition_fetch_bytes: "${TB_QUEUE_KAFKA_MAX_PARTITION_FETCH_BYTES:16777216}"
1034 1034 fetch_max_bytes: "${TB_QUEUE_KAFKA_FETCH_MAX_BYTES:134217728}"
1035   - use_confluent_cloud: "${TB_QUEUE_KAFKA_USE_CONFLUENT_CLOUD:false}"
  1035 + use_confluent_cloud: "${TB_QUEUE_KAFKA_USE_CONFLUENT_CLOUD:true}"
1036 1036 confluent:
1037   - ssl.algorithm: "${TB_QUEUE_KAFKA_CONFLUENT_SSL_ALGORITHM:https}"
1038   - sasl.mechanism: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_MECHANISM:PLAIN}"
1039   - sasl.config: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_JAAS_CONFIG:org.apache.kafka.common.security.plain.PlainLoginModule required username=\"CLUSTER_API_KEY\" password=\"CLUSTER_API_SECRET\";}"
  1037 + ssl.algorithm: "${TB_QUEUE_KAFKA_CONFLUENT_SSL_ALGORITHM:http}"
  1038 + sasl.mechanism: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_MECHANISM:SCRAM-SHA-512}"
  1039 + sasl.config: "${TB_QUEUE_KAFKA_CONFLUENT_SASL_JAAS_CONFIG:org.apache.kafka.common.security.scram.ScramLoginModule required username=\"thingskit\" password=\"thingskit\";}"
1040 1040 security.protocol: "${TB_QUEUE_KAFKA_CONFLUENT_SECURITY_PROTOCOL:SASL_SSL}"
1041 1041 # Key-value properties for Kafka consumer per specific topic, e.g. tb_ota_package is a topic name for ota, tb_rule_engine.sq is a topic name for default SequentialByOriginator queue.
1042 1042 # Check TB_QUEUE_CORE_OTA_TOPIC and TB_QUEUE_RE_SQ_TOPIC params
... ...