Commit 5f40dcacdbd8c5524db2031a65d775bd3bf67de0
1 parent
62db6001
Fix docker env files. Improve logging.
Showing
6 changed files
with
37 additions
and
9 deletions
@@ -136,6 +136,7 @@ public class RemoteJsInvokeService extends AbstractJsInvokeService { | @@ -136,6 +136,7 @@ public class RemoteJsInvokeService extends AbstractJsInvokeService { | ||
136 | .setCompileRequest(jsRequest) | 136 | .setCompileRequest(jsRequest) |
137 | .build(); | 137 | .build(); |
138 | 138 | ||
139 | + log.trace("Post compile request for scriptId [{}]", scriptId); | ||
139 | ListenableFuture<JsInvokeProtos.RemoteJsResponse> future = kafkaTemplate.post(scriptId.toString(), jsRequestWrapper); | 140 | ListenableFuture<JsInvokeProtos.RemoteJsResponse> future = kafkaTemplate.post(scriptId.toString(), jsRequestWrapper); |
140 | return Futures.transform(future, response -> { | 141 | return Futures.transform(future, response -> { |
141 | JsInvokeProtos.JsCompileResponse compilationResult = response.getCompileResponse(); | 142 | JsInvokeProtos.JsCompileResponse compilationResult = response.getCompileResponse(); |
@@ -92,6 +92,9 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | @@ -92,6 +92,9 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | ||
92 | long nextCleanupMs = 0L; | 92 | long nextCleanupMs = 0L; |
93 | while (!stopped) { | 93 | while (!stopped) { |
94 | ConsumerRecords<String, byte[]> responses = responseTemplate.poll(Duration.ofMillis(pollInterval)); | 94 | ConsumerRecords<String, byte[]> responses = responseTemplate.poll(Duration.ofMillis(pollInterval)); |
95 | + if (responses.count() > 0) { | ||
96 | + log.trace("Polling responses completed, consumer records count [{}]", responses.count()); | ||
97 | + } | ||
95 | responses.forEach(response -> { | 98 | responses.forEach(response -> { |
96 | Header requestIdHeader = response.headers().lastHeader(TbKafkaSettings.REQUEST_ID_HEADER); | 99 | Header requestIdHeader = response.headers().lastHeader(TbKafkaSettings.REQUEST_ID_HEADER); |
97 | Response decodedResponse = null; | 100 | Response decodedResponse = null; |
@@ -109,6 +112,7 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | @@ -109,6 +112,7 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | ||
109 | if (requestId == null) { | 112 | if (requestId == null) { |
110 | log.error("[{}] Missing requestId in header and body", response); | 113 | log.error("[{}] Missing requestId in header and body", response); |
111 | } else { | 114 | } else { |
115 | + log.trace("[{}] Response received", requestId); | ||
112 | ResponseMetaData<Response> expectedResponse = pendingRequests.remove(requestId); | 116 | ResponseMetaData<Response> expectedResponse = pendingRequests.remove(requestId); |
113 | if (expectedResponse == null) { | 117 | if (expectedResponse == null) { |
114 | log.trace("[{}] Invalid or stale request", requestId); | 118 | log.trace("[{}] Invalid or stale request", requestId); |
@@ -132,6 +136,7 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | @@ -132,6 +136,7 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | ||
132 | if (kv.getValue().expTime < tickTs) { | 136 | if (kv.getValue().expTime < tickTs) { |
133 | ResponseMetaData<Response> staleRequest = pendingRequests.remove(kv.getKey()); | 137 | ResponseMetaData<Response> staleRequest = pendingRequests.remove(kv.getKey()); |
134 | if (staleRequest != null) { | 138 | if (staleRequest != null) { |
139 | + log.trace("[{}] Request timeout detected, expTime [{}], tickTs [{}]", kv.getKey(), staleRequest.expTime, tickTs); | ||
135 | staleRequest.future.setException(new TimeoutException()); | 140 | staleRequest.future.setException(new TimeoutException()); |
136 | } | 141 | } |
137 | } | 142 | } |
@@ -158,8 +163,10 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | @@ -158,8 +163,10 @@ public class TbKafkaRequestTemplate<Request, Response> extends AbstractTbKafkaTe | ||
158 | headers.add(new RecordHeader(TbKafkaSettings.REQUEST_ID_HEADER, uuidToBytes(requestId))); | 163 | headers.add(new RecordHeader(TbKafkaSettings.REQUEST_ID_HEADER, uuidToBytes(requestId))); |
159 | headers.add(new RecordHeader(TbKafkaSettings.RESPONSE_TOPIC_HEADER, stringToBytes(responseTemplate.getTopic()))); | 164 | headers.add(new RecordHeader(TbKafkaSettings.RESPONSE_TOPIC_HEADER, stringToBytes(responseTemplate.getTopic()))); |
160 | SettableFuture<Response> future = SettableFuture.create(); | 165 | SettableFuture<Response> future = SettableFuture.create(); |
161 | - pendingRequests.putIfAbsent(requestId, new ResponseMetaData<>(tickTs + maxRequestTimeout, future)); | 166 | + ResponseMetaData<Response> responseMetaData = new ResponseMetaData<>(tickTs + maxRequestTimeout, future); |
167 | + pendingRequests.putIfAbsent(requestId, responseMetaData); | ||
162 | request = requestTemplate.enrich(request, responseTemplate.getTopic(), requestId); | 168 | request = requestTemplate.enrich(request, responseTemplate.getTopic(), requestId); |
169 | + log.trace("[{}] Sending request, key [{}], expTime [{}]", requestId, key, responseMetaData.expTime); | ||
163 | requestTemplate.send(key, request, headers, null); | 170 | requestTemplate.send(key, request, headers, null); |
164 | return future; | 171 | return future; |
165 | } | 172 | } |
@@ -16,5 +16,3 @@ TB_VERSION=latest | @@ -16,5 +16,3 @@ TB_VERSION=latest | ||
16 | DATABASE=postgres | 16 | DATABASE=postgres |
17 | 17 | ||
18 | LOAD_BALANCER_NAME=haproxy-certbot | 18 | LOAD_BALANCER_NAME=haproxy-certbot |
19 | - | ||
20 | -KAFKA_TOPICS="js.eval.requests:100:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.transport.api.requests:30:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.rule-engine:30:1" |
@@ -4,7 +4,7 @@ KAFKA_LISTENERS=INSIDE://:9093,OUTSIDE://:9092 | @@ -4,7 +4,7 @@ KAFKA_LISTENERS=INSIDE://:9093,OUTSIDE://:9092 | ||
4 | KAFKA_ADVERTISED_LISTENERS=INSIDE://:9093,OUTSIDE://kafka:9092 | 4 | KAFKA_ADVERTISED_LISTENERS=INSIDE://:9093,OUTSIDE://kafka:9092 |
5 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT | 5 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT |
6 | KAFKA_INTER_BROKER_LISTENER_NAME=INSIDE | 6 | KAFKA_INTER_BROKER_LISTENER_NAME=INSIDE |
7 | -KAFKA_CREATE_TOPICS=${KAFKA_TOPICS} | 7 | +KAFKA_CREATE_TOPICS=js.eval.requests:100:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.transport.api.requests:30:1:delete --config=retention.ms=60000 --config=segment.bytes=26214400 --config=retention.bytes=104857600,tb.rule-engine:30:1 |
8 | KAFKA_AUTO_CREATE_TOPICS_ENABLE=false | 8 | KAFKA_AUTO_CREATE_TOPICS_ENABLE=false |
9 | KAFKA_LOG_RETENTION_BYTES=1073741824 | 9 | KAFKA_LOG_RETENTION_BYTES=1073741824 |
10 | KAFKA_LOG_SEGMENT_BYTES=268435456 | 10 | KAFKA_LOG_SEGMENT_BYTES=268435456 |
@@ -87,10 +87,17 @@ public class ThingsBoardDbInstaller extends ExternalResource { | @@ -87,10 +87,17 @@ public class ThingsBoardDbInstaller extends ExternalResource { | ||
87 | 87 | ||
88 | @Override | 88 | @Override |
89 | protected void after() { | 89 | protected void after() { |
90 | - File tbLogsDir = new File("./target/tb-logs/"); | 90 | + copyLogs(tbLogVolume, "./target/tb-logs/"); |
91 | + | ||
92 | + dockerCompose.withCommand("volume rm -f " + postgresDataVolume + " " + tbLogVolume); | ||
93 | + dockerCompose.invokeDocker(); | ||
94 | + } | ||
95 | + | ||
96 | + private void copyLogs(String volumeName, String targetDir) { | ||
97 | + File tbLogsDir = new File(targetDir); | ||
91 | tbLogsDir.mkdirs(); | 98 | tbLogsDir.mkdirs(); |
92 | 99 | ||
93 | - dockerCompose.withCommand("run -d --rm --name tb-logs-container -v " + tbLogVolume + ":/root alpine tail -f /dev/null"); | 100 | + dockerCompose.withCommand("run -d --rm --name tb-logs-container -v " + volumeName + ":/root alpine tail -f /dev/null"); |
94 | dockerCompose.invokeDocker(); | 101 | dockerCompose.invokeDocker(); |
95 | 102 | ||
96 | dockerCompose.withCommand("cp tb-logs-container:/root/. "+tbLogsDir.getAbsolutePath()); | 103 | dockerCompose.withCommand("cp tb-logs-container:/root/. "+tbLogsDir.getAbsolutePath()); |
@@ -98,9 +105,6 @@ public class ThingsBoardDbInstaller extends ExternalResource { | @@ -98,9 +105,6 @@ public class ThingsBoardDbInstaller extends ExternalResource { | ||
98 | 105 | ||
99 | dockerCompose.withCommand("rm -f tb-logs-container"); | 106 | dockerCompose.withCommand("rm -f tb-logs-container"); |
100 | dockerCompose.invokeDocker(); | 107 | dockerCompose.invokeDocker(); |
101 | - | ||
102 | - dockerCompose.withCommand("volume rm -f " + postgresDataVolume + " " + tbLogVolume); | ||
103 | - dockerCompose.invokeDocker(); | ||
104 | } | 108 | } |
105 | 109 | ||
106 | } | 110 | } |
@@ -45,6 +45,24 @@ var kafkaClient; | @@ -45,6 +45,24 @@ var kafkaClient; | ||
45 | kafkaRequestTopic | 45 | kafkaRequestTopic |
46 | ); | 46 | ); |
47 | 47 | ||
48 | + consumer.on('error', (err) => { | ||
49 | + logger.error('Unexpected kafka consumer error: %s', err.message); | ||
50 | + logger.error(err.stack); | ||
51 | + }); | ||
52 | + | ||
53 | + consumer.on('offsetOutOfRange', (err) => { | ||
54 | + logger.error('Offset out of range error: %s', err.message); | ||
55 | + logger.error(err.stack); | ||
56 | + }); | ||
57 | + | ||
58 | + consumer.on('rebalancing', () => { | ||
59 | + logger.info('Rebalancing event received.'); | ||
60 | + }) | ||
61 | + | ||
62 | + consumer.on('rebalanced', () => { | ||
63 | + logger.info('Rebalanced event received.'); | ||
64 | + }); | ||
65 | + | ||
48 | var producer = new Producer(kafkaClient); | 66 | var producer = new Producer(kafkaClient); |
49 | producer.on('error', (err) => { | 67 | producer.on('error', (err) => { |
50 | logger.error('Unexpected kafka producer error: %s', err.message); | 68 | logger.error('Unexpected kafka producer error: %s', err.message); |