Commit 5f40dcacdbd8c5524db2031a65d775bd3bf67de0

Authored by Igor Kulikov
1 parent 62db6001

Fix docker env files. Improve logging.

... ... @@ -136,6 +136,7 @@ public class RemoteJsInvokeService extends AbstractJsInvokeService {
136 136 .setCompileRequest(jsRequest)
137 137 .build();
138 138
  139 + log.trace("Post compile request for scriptId [{}]", scriptId);
139 140 ListenableFuture<JsInvokeProtos.RemoteJsResponse> future = kafkaTemplate.post(scriptId.toString(), jsRequestWrapper);
140 141 return Futures.transform(future, response -> {
141 142 JsInvokeProtos.JsCompileResponse compilationResult = response.getCompileResponse();
... ...
... ... @@ -92,6 +92,9 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe
92 92 long nextCleanupMs = 0L;
93 93 while (!stopped) {
94 94 ConsumerRecords<String, byte[]> responses = responseTemplate.poll(Duration.ofMillis(pollInterval));
  95 + if (responses.count() > 0) {
  96 + log.trace("Polling responses completed, consumer records count [{}]", responses.count());
  97 + }
95 98 responses.forEach(response -> {
96 99 Header requestIdHeader = response.headers().lastHeader(TbKafkaSettings.REQUEST_ID_HEADER);
97 100 Response decodedResponse = null;
... ... @@ -109,6 +112,7 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe
109 112 if (requestId == null) {
110 113 log.error("[{}] Missing requestId in header and body", response);
111 114 } else {
  115 + log.trace("[{}] Response received", requestId);
112 116 ResponseMetaData<Response> expectedResponse = pendingRequests.remove(requestId);
113 117 if (expectedResponse == null) {
114 118 log.trace("[{}] Invalid or stale request", requestId);
... ... @@ -132,6 +136,7 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe
132 136 if (kv.getValue().expTime < tickTs) {
133 137 ResponseMetaData<Response> staleRequest = pendingRequests.remove(kv.getKey());
134 138 if (staleRequest != null) {
  139 + log.trace("[{}] Request timeout detected, expTime [{}], tickTs [{}]", kv.getKey(), staleRequest.expTime, tickTs);
135 140 staleRequest.future.setException(new TimeoutException());
136 141 }
137 142 }
... ... @@ -158,8 +163,10 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe
158 163 headers.add(new RecordHeader(TbKafkaSettings.REQUEST_ID_HEADER, uuidToBytes(requestId)));
159 164 headers.add(new RecordHeader(TbKafkaSettings.RESPONSE_TOPIC_HEADER, stringToBytes(responseTemplate.getTopic())));
160 165 SettableFuture<Response> future = SettableFuture.create();
161   - pendingRequests.putIfAbsent(requestId, new ResponseMetaData<>(tickTs + maxRequestTimeout, future));
  166 + ResponseMetaData<Response> responseMetaData = new ResponseMetaData<>(tickTs + maxRequestTimeout, future);
  167 + pendingRequests.putIfAbsent(requestId, responseMetaData);
162 168 request = requestTemplate.enrich(request, responseTemplate.getTopic(), requestId);
  169 + log.trace("[{}] Sending request, key [{}], expTime [{}]", requestId, key, responseMetaData.expTime);
163 170 requestTemplate.send(key, request, headers, null);
164 171 return future;
165 172 }
... ...
... ... @@ -16,5 +16,3 @@ TB_VERSION=latest
16 16 DATABASE=postgres
17 17
18 18 LOAD_BALANCER_NAME=haproxy-certbot
19   -
20   -KAFKA_TOPICS="js.eval.requests:100:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.transport.api.requests:30:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.rule-engine:30:1"
... ...
... ... @@ -4,7 +4,7 @@ KAFKA_LISTENERS=INSIDE://:9093,OUTSIDE://:9092
4 4 KAFKA_ADVERTISED_LISTENERS=INSIDE://:9093,OUTSIDE://kafka:9092
5 5 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
6 6 KAFKA_INTER_BROKER_LISTENER_NAME=INSIDE
7   -KAFKA_CREATE_TOPICS=${KAFKA_TOPICS}
  7 +KAFKA_CREATE_TOPICS=js.eval.requests:100:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.transport.api.requests:30:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.rule-engine:30:1
8 8 KAFKA_AUTO_CREATE_TOPICS_ENABLE=false
9 9 KAFKA_LOG_RETENTION_BYTES=1073741824
10 10 KAFKA_LOG_SEGMENT_BYTES=268435456
... ...
... ... @@ -87,10 +87,17 @@ public class ThingsBoardDbInstaller extends ExternalResource {
87 87
88 88 @Override
89 89 protected void after() {
90   - File tbLogsDir = new File("./target/tb-logs/");
  90 + copyLogs(tbLogVolume, "./target/tb-logs/");
  91 +
  92 + dockerCompose.withCommand("volume rm -f " + postgresDataVolume + " " + tbLogVolume);
  93 + dockerCompose.invokeDocker();
  94 + }
  95 +
  96 + private void copyLogs(String volumeName, String targetDir) {
  97 + File tbLogsDir = new File(targetDir);
91 98 tbLogsDir.mkdirs();
92 99
93   - dockerCompose.withCommand("run -d --rm --name tb-logs-container -v " + tbLogVolume + ":/root alpine tail -f /dev/null");
  100 + dockerCompose.withCommand("run -d --rm --name tb-logs-container -v " + volumeName + ":/root alpine tail -f /dev/null");
94 101 dockerCompose.invokeDocker();
95 102
96 103 dockerCompose.withCommand("cp tb-logs-container:/root/. "+tbLogsDir.getAbsolutePath());
... ... @@ -98,9 +105,6 @@ public class ThingsBoardDbInstaller extends ExternalResource {
98 105
99 106 dockerCompose.withCommand("rm -f tb-logs-container");
100 107 dockerCompose.invokeDocker();
101   -
102   - dockerCompose.withCommand("volume rm -f " + postgresDataVolume + " " + tbLogVolume);
103   - dockerCompose.invokeDocker();
104 108 }
105 109
106 110 }
... ...
... ... @@ -45,6 +45,24 @@ var kafkaClient;
45 45 kafkaRequestTopic
46 46 );
47 47
  48 + consumer.on('error', (err) => {
  49 + logger.error('Unexpected kafka consumer error: %s', err.message);
  50 + logger.error(err.stack);
  51 + });
  52 +
  53 + consumer.on('offsetOutOfRange', (err) => {
  54 + logger.error('Offset out of range error: %s', err.message);
  55 + logger.error(err.stack);
  56 + });
  57 +
  58 + consumer.on('rebalancing', () => {
  59 + logger.info('Rebalancing event received.');
  60 + })
  61 +
  62 + consumer.on('rebalanced', () => {
  63 + logger.info('Rebalanced event received.');
  64 + });
  65 +
48 66 var producer = new Producer(kafkaClient);
49 67 producer.on('error', (err) => {
50 68 logger.error('Unexpected kafka producer error: %s', err.message);
... ...