001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements. See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License. You may obtain a copy of the License at
008 *
009 * http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017 package org.apache.camel.processor.aggregate;
018
019 import java.util.ArrayList;
020 import java.util.HashMap;
021 import java.util.HashSet;
022 import java.util.LinkedHashSet;
023 import java.util.List;
024 import java.util.Map;
025 import java.util.Set;
026 import java.util.concurrent.ConcurrentHashMap;
027 import java.util.concurrent.ExecutorService;
028 import java.util.concurrent.ScheduledExecutorService;
029 import java.util.concurrent.TimeUnit;
030 import java.util.concurrent.atomic.AtomicInteger;
031 import java.util.concurrent.locks.Lock;
032 import java.util.concurrent.locks.ReentrantLock;
033
034 import org.apache.camel.CamelContext;
035 import org.apache.camel.CamelExchangeException;
036 import org.apache.camel.Endpoint;
037 import org.apache.camel.Exchange;
038 import org.apache.camel.Expression;
039 import org.apache.camel.Navigate;
040 import org.apache.camel.NoSuchEndpointException;
041 import org.apache.camel.Predicate;
042 import org.apache.camel.Processor;
043 import org.apache.camel.ProducerTemplate;
044 import org.apache.camel.TimeoutMap;
045 import org.apache.camel.Traceable;
046 import org.apache.camel.impl.LoggingExceptionHandler;
047 import org.apache.camel.spi.AggregationRepository;
048 import org.apache.camel.spi.ExceptionHandler;
049 import org.apache.camel.spi.RecoverableAggregationRepository;
050 import org.apache.camel.spi.ShutdownPrepared;
051 import org.apache.camel.spi.Synchronization;
052 import org.apache.camel.support.DefaultTimeoutMap;
053 import org.apache.camel.support.ServiceSupport;
054 import org.apache.camel.util.ExchangeHelper;
055 import org.apache.camel.util.LRUCache;
056 import org.apache.camel.util.ObjectHelper;
057 import org.apache.camel.util.ServiceHelper;
058 import org.apache.camel.util.StopWatch;
059 import org.apache.camel.util.TimeUtils;
060 import org.slf4j.Logger;
061 import org.slf4j.LoggerFactory;
062
063 /**
064 * An implementation of the <a
065 * href="http://camel.apache.org/aggregator2.html">Aggregator</a>
066 * pattern where a batch of messages are processed (up to a maximum amount or
067 * until some timeout is reached) and messages for the same correlation key are
068 * combined together using some kind of {@link AggregationStrategy}
069 * (by default the latest message is used) to compress many message exchanges
070 * into a smaller number of exchanges.
071 * <p/>
072 * A good example of this is stock market data; you may be receiving 30,000
073 * messages/second and you may want to throttle it right down so that multiple
074 * messages for the same stock are combined (or just the latest message is used
075 * and older prices are discarded). Another idea is to combine line item messages
076 * together into a single invoice message.
077 */
078 public class AggregateProcessor extends ServiceSupport implements Processor, Navigate<Processor>, Traceable, ShutdownPrepared {
079
080 public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker";
081
082 private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class);
083
084 private final Lock lock = new ReentrantLock();
085 private final CamelContext camelContext;
086 private final Processor processor;
087 private final AggregationStrategy aggregationStrategy;
088 private final Expression correlationExpression;
089 private final ExecutorService executorService;
090 private final boolean shutdownExecutorService;
091 private ScheduledExecutorService timeoutCheckerExecutorService;
092 private boolean shutdownTimeoutCheckerExecutorService;
093 private ScheduledExecutorService recoverService;
094 // store correlation key -> exchange id in timeout map
095 private TimeoutMap<String, String> timeoutMap;
096 private ExceptionHandler exceptionHandler = new LoggingExceptionHandler(getClass());
097 private AggregationRepository aggregationRepository = new MemoryAggregationRepository();
098 private Map<Object, Object> closedCorrelationKeys;
099 private Set<String> batchConsumerCorrelationKeys = new LinkedHashSet<String>();
100 private final Set<String> inProgressCompleteExchanges = new HashSet<String>();
101 private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<String, RedeliveryData>();
102
103 // keep booking about redelivery
104 private class RedeliveryData {
105 int redeliveryCounter;
106 }
107
108 // options
109 private boolean ignoreInvalidCorrelationKeys;
110 private Integer closeCorrelationKeyOnCompletion;
111 private boolean parallelProcessing;
112
113 // different ways to have completion triggered
114 private boolean eagerCheckCompletion;
115 private Predicate completionPredicate;
116 private long completionTimeout;
117 private Expression completionTimeoutExpression;
118 private long completionInterval;
119 private int completionSize;
120 private Expression completionSizeExpression;
121 private boolean completionFromBatchConsumer;
122 private AtomicInteger batchConsumerCounter = new AtomicInteger();
123 private boolean discardOnCompletionTimeout;
124 private boolean forceCompletionOnStop;
125
126 private ProducerTemplate deadLetterProducerTemplate;
127
128 public AggregateProcessor(CamelContext camelContext, Processor processor,
129 Expression correlationExpression, AggregationStrategy aggregationStrategy,
130 ExecutorService executorService, boolean shutdownExecutorService) {
131 ObjectHelper.notNull(camelContext, "camelContext");
132 ObjectHelper.notNull(processor, "processor");
133 ObjectHelper.notNull(correlationExpression, "correlationExpression");
134 ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy");
135 ObjectHelper.notNull(executorService, "executorService");
136 this.camelContext = camelContext;
137 this.processor = processor;
138 this.correlationExpression = correlationExpression;
139 this.aggregationStrategy = aggregationStrategy;
140 this.executorService = executorService;
141 this.shutdownExecutorService = shutdownExecutorService;
142 }
143
144 @Override
145 public String toString() {
146 return "AggregateProcessor[to: " + processor + "]";
147 }
148
149 public String getTraceLabel() {
150 return "aggregate[" + correlationExpression + "]";
151 }
152
153 public List<Processor> next() {
154 if (!hasNext()) {
155 return null;
156 }
157 List<Processor> answer = new ArrayList<Processor>(1);
158 answer.add(processor);
159 return answer;
160 }
161
162 public boolean hasNext() {
163 return processor != null;
164 }
165
166 public void process(Exchange exchange) throws Exception {
167
168 //check for the special header to force completion of all groups (and ignore the exchange otherwise)
169 boolean completeAllGroups = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class);
170 if (completeAllGroups) {
171 forceCompletionOfAllGroups();
172 return;
173 }
174
175 // compute correlation expression
176 String key = correlationExpression.evaluate(exchange, String.class);
177 if (ObjectHelper.isEmpty(key)) {
178 // we have a bad correlation key
179 if (isIgnoreInvalidCorrelationKeys()) {
180 LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
181 return;
182 } else {
183 throw new CamelExchangeException("Invalid correlation key", exchange);
184 }
185 }
186
187 // is the correlation key closed?
188 if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) {
189 throw new ClosedCorrelationKeyException(key, exchange);
190 }
191
192 // copy exchange, and do not share the unit of work
193 // the aggregated output runs in another unit of work
194 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
195
196 // when memory based then its fast using synchronized, but if the aggregation repository is IO
197 // bound such as JPA etc then concurrent aggregation per correlation key could
198 // improve performance as we can run aggregation repository get/add in parallel
199 lock.lock();
200 try {
201 doAggregation(key, copy);
202 } finally {
203 lock.unlock();
204 }
205 }
206
207 /**
208 * Aggregates the exchange with the given correlation key
209 * <p/>
210 * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key
211 * in parallel.
212 *
213 * @param key the correlation key
214 * @param exchange the exchange
215 * @return the aggregated exchange
216 * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
217 */
218 private Exchange doAggregation(String key, Exchange exchange) throws CamelExchangeException {
219 LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
220
221 Exchange answer;
222 Exchange oldExchange = aggregationRepository.get(exchange.getContext(), key);
223 Exchange newExchange = exchange;
224
225 Integer size = 1;
226 if (oldExchange != null) {
227 size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class);
228 size++;
229 }
230
231 // check if we are complete
232 String complete = null;
233 if (isEagerCheckCompletion()) {
234 // put the current aggregated size on the exchange so its avail during completion check
235 newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
236 complete = isCompleted(key, newExchange);
237 // remove it afterwards
238 newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
239 }
240
241 // prepare the exchanges for aggregation and aggregate it
242 ExchangeHelper.prepareAggregation(oldExchange, newExchange);
243 // must catch any exception from aggregation
244 try {
245 answer = onAggregation(oldExchange, exchange);
246 } catch (Throwable e) {
247 throw new CamelExchangeException("Error occurred during aggregation", exchange, e);
248 }
249 if (answer == null) {
250 throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", exchange);
251 }
252
253 // update the aggregated size
254 answer.setProperty(Exchange.AGGREGATED_SIZE, size);
255
256 // maybe we should check completion after the aggregation
257 if (!isEagerCheckCompletion()) {
258 complete = isCompleted(key, answer);
259 }
260
261 // only need to update aggregation repository if we are not complete
262 if (complete == null) {
263 LOG.trace("In progress aggregated exchange: {} with correlation key: {}", answer, key);
264 aggregationRepository.add(exchange.getContext(), key, answer);
265 } else {
266 // if batch consumer completion is enabled then we need to complete the group
267 if ("consumer".equals(complete)) {
268 for (String batchKey : batchConsumerCorrelationKeys) {
269 Exchange batchAnswer;
270 if (batchKey.equals(key)) {
271 // skip the current aggregated key as we have already aggregated it and have the answer
272 batchAnswer = answer;
273 } else {
274 batchAnswer = aggregationRepository.get(camelContext, batchKey);
275 }
276
277 if (batchAnswer != null) {
278 batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
279 onCompletion(batchKey, batchAnswer, false);
280 }
281 }
282 batchConsumerCorrelationKeys.clear();
283 } else {
284 // we are complete for this exchange
285 answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
286 onCompletion(key, answer, false);
287 }
288 }
289
290 LOG.trace("onAggregation +++ end +++ with correlation key: {}", key);
291
292 return answer;
293 }
294
295 /**
296 * Tests whether the given exchange is complete or not
297 *
298 * @param key the correlation key
299 * @param exchange the incoming exchange
300 * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion
301 */
302 protected String isCompleted(String key, Exchange exchange) {
303 if (getCompletionPredicate() != null) {
304 boolean answer = getCompletionPredicate().matches(exchange);
305 if (answer) {
306 return "predicate";
307 }
308 }
309
310 if (getCompletionSizeExpression() != null) {
311 Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class);
312 if (value != null && value > 0) {
313 int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
314 if (size >= value) {
315 return "size";
316 }
317 }
318 }
319 if (getCompletionSize() > 0) {
320 int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
321 if (size >= getCompletionSize()) {
322 return "size";
323 }
324 }
325
326 // timeout can be either evaluated based on an expression or from a fixed value
327 // expression takes precedence
328 boolean timeoutSet = false;
329 if (getCompletionTimeoutExpression() != null) {
330 Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class);
331 if (value != null && value > 0) {
332 if (LOG.isTraceEnabled()) {
333 LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
334 new Object[]{key, value, exchange});
335 }
336 addExchangeToTimeoutMap(key, exchange, value);
337 timeoutSet = true;
338 }
339 }
340 if (!timeoutSet && getCompletionTimeout() > 0) {
341 // timeout is used so use the timeout map to keep an eye on this
342 if (LOG.isTraceEnabled()) {
343 LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
344 new Object[]{key, getCompletionTimeout(), exchange});
345 }
346 addExchangeToTimeoutMap(key, exchange, getCompletionTimeout());
347 }
348
349 if (isCompletionFromBatchConsumer()) {
350 batchConsumerCorrelationKeys.add(key);
351 batchConsumerCounter.incrementAndGet();
352 int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class);
353 if (size > 0 && batchConsumerCounter.intValue() >= size) {
354 // batch consumer is complete then reset the counter
355 batchConsumerCounter.set(0);
356 return "consumer";
357 }
358 }
359
360 // not complete
361 return null;
362 }
363
364 protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) {
365 return aggregationStrategy.aggregate(oldExchange, newExchange);
366 }
367
368 protected void onCompletion(final String key, final Exchange exchange, boolean fromTimeout) {
369 // store the correlation key as property
370 exchange.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
371 // remove from repository as its completed
372 aggregationRepository.remove(exchange.getContext(), key, exchange);
373 if (!fromTimeout && timeoutMap != null) {
374 // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker)
375 timeoutMap.remove(key);
376 }
377
378 // this key has been closed so add it to the closed map
379 if (closedCorrelationKeys != null) {
380 closedCorrelationKeys.put(key, key);
381 }
382
383 if (fromTimeout) {
384 // invoke timeout if its timeout aware aggregation strategy,
385 // to allow any custom processing before discarding the exchange
386 if (aggregationStrategy instanceof TimeoutAwareAggregationStrategy) {
387 long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1;
388 ((TimeoutAwareAggregationStrategy) aggregationStrategy).timeout(exchange, -1, -1, timeout);
389 }
390 }
391
392 if (fromTimeout && isDiscardOnCompletionTimeout()) {
393 // discard due timeout
394 LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: ()", key, exchange);
395 // must confirm the discarded exchange
396 aggregationRepository.confirm(exchange.getContext(), exchange.getExchangeId());
397 // and remove redelivery state as well
398 redeliveryState.remove(exchange.getExchangeId());
399 } else {
400 // the aggregated exchange should be published (sent out)
401 onSubmitCompletion(key, exchange);
402 }
403 }
404
405 private void onSubmitCompletion(final Object key, final Exchange exchange) {
406 LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
407
408 // add this as in progress before we submit the task
409 inProgressCompleteExchanges.add(exchange.getExchangeId());
410
411 // invoke the on completion callback
412 if (aggregationStrategy instanceof CompletionAwareAggregationStrategy) {
413 ((CompletionAwareAggregationStrategy) aggregationStrategy).onCompletion(exchange);
414 }
415
416 // send this exchange
417 executorService.submit(new Runnable() {
418 public void run() {
419 LOG.debug("Processing aggregated exchange: {}", exchange);
420
421 // add on completion task so we remember to update the inProgressCompleteExchanges
422 exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId()));
423
424 try {
425 processor.process(exchange);
426 } catch (Throwable e) {
427 exchange.setException(e);
428 }
429
430 // log exception if there was a problem
431 if (exchange.getException() != null) {
432 // if there was an exception then let the exception handler handle it
433 getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException());
434 } else {
435 LOG.trace("Processing aggregated exchange: {} complete.", exchange);
436 }
437 }
438 });
439 }
440
441 /**
442 * Restores the timeout map with timeout values from the aggregation repository.
443 * <p/>
444 * This is needed in case the aggregator has been stopped and started again (for example a server restart).
445 * Then the existing exchanges from the {@link AggregationRepository} must have its timeout conditions restored.
446 */
447 protected void restoreTimeoutMapFromAggregationRepository() throws Exception {
448 // grab the timeout value for each partly aggregated exchange
449 Set<String> keys = aggregationRepository.getKeys();
450 if (keys == null || keys.isEmpty()) {
451 return;
452 }
453
454 StopWatch watch = new StopWatch();
455 LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
456
457 for (String key : keys) {
458 Exchange exchange = aggregationRepository.get(camelContext, key);
459 // grab the timeout value
460 long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0;
461 if (timeout > 0) {
462 LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
463 addExchangeToTimeoutMap(key, exchange, timeout);
464 }
465 }
466
467 // log duration of this task so end user can see how long it takes to pre-check this upon starting
468 LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
469 timeoutMap.size(), TimeUtils.printDuration(watch.stop()));
470 }
471
472 /**
473 * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts.
474 *
475 * @param key the correlation key
476 * @param exchange the exchange
477 * @param timeout the timeout value in millis
478 */
479 private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) {
480 // store the timeout value on the exchange as well, in case we need it later
481 exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout);
482 timeoutMap.put(key, exchange.getExchangeId(), timeout);
483 }
484
485 public Predicate getCompletionPredicate() {
486 return completionPredicate;
487 }
488
489 public void setCompletionPredicate(Predicate completionPredicate) {
490 this.completionPredicate = completionPredicate;
491 }
492
493 public boolean isEagerCheckCompletion() {
494 return eagerCheckCompletion;
495 }
496
497 public void setEagerCheckCompletion(boolean eagerCheckCompletion) {
498 this.eagerCheckCompletion = eagerCheckCompletion;
499 }
500
501 public long getCompletionTimeout() {
502 return completionTimeout;
503 }
504
505 public void setCompletionTimeout(long completionTimeout) {
506 this.completionTimeout = completionTimeout;
507 }
508
509 public Expression getCompletionTimeoutExpression() {
510 return completionTimeoutExpression;
511 }
512
513 public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) {
514 this.completionTimeoutExpression = completionTimeoutExpression;
515 }
516
517 public long getCompletionInterval() {
518 return completionInterval;
519 }
520
521 public void setCompletionInterval(long completionInterval) {
522 this.completionInterval = completionInterval;
523 }
524
525 public int getCompletionSize() {
526 return completionSize;
527 }
528
529 public void setCompletionSize(int completionSize) {
530 this.completionSize = completionSize;
531 }
532
533 public Expression getCompletionSizeExpression() {
534 return completionSizeExpression;
535 }
536
537 public void setCompletionSizeExpression(Expression completionSizeExpression) {
538 this.completionSizeExpression = completionSizeExpression;
539 }
540
541 public boolean isIgnoreInvalidCorrelationKeys() {
542 return ignoreInvalidCorrelationKeys;
543 }
544
545 public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) {
546 this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys;
547 }
548
549 public Integer getCloseCorrelationKeyOnCompletion() {
550 return closeCorrelationKeyOnCompletion;
551 }
552
553 public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) {
554 this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion;
555 }
556
557 public boolean isCompletionFromBatchConsumer() {
558 return completionFromBatchConsumer;
559 }
560
561 public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) {
562 this.completionFromBatchConsumer = completionFromBatchConsumer;
563 }
564
565 public ExceptionHandler getExceptionHandler() {
566 return exceptionHandler;
567 }
568
569 public void setExceptionHandler(ExceptionHandler exceptionHandler) {
570 this.exceptionHandler = exceptionHandler;
571 }
572
573 public boolean isParallelProcessing() {
574 return parallelProcessing;
575 }
576
577 public void setParallelProcessing(boolean parallelProcessing) {
578 this.parallelProcessing = parallelProcessing;
579 }
580
581 public AggregationRepository getAggregationRepository() {
582 return aggregationRepository;
583 }
584
585 public void setAggregationRepository(AggregationRepository aggregationRepository) {
586 this.aggregationRepository = aggregationRepository;
587 }
588
589 public boolean isDiscardOnCompletionTimeout() {
590 return discardOnCompletionTimeout;
591 }
592
593 public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) {
594 this.discardOnCompletionTimeout = discardOnCompletionTimeout;
595 }
596
597 public void setForceCompletionOnStop(boolean forceCompletionOnStop) {
598 this.forceCompletionOnStop = forceCompletionOnStop;
599 }
600
601 public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) {
602 this.timeoutCheckerExecutorService = timeoutCheckerExecutorService;
603 }
604
605 public ScheduledExecutorService getTimeoutCheckerExecutorService() {
606 return timeoutCheckerExecutorService;
607 }
608
609 public boolean isShutdownTimeoutCheckerExecutorService() {
610 return shutdownTimeoutCheckerExecutorService;
611 }
612
613 public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) {
614 this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService;
615 }
616
617 /**
618 * On completion task which keeps the booking of the in progress up to date
619 */
620 private final class AggregateOnCompletion implements Synchronization {
621 private final String exchangeId;
622
623 private AggregateOnCompletion(String exchangeId) {
624 // must use the original exchange id as it could potentially change if send over SEDA etc.
625 this.exchangeId = exchangeId;
626 }
627
628 public void onFailure(Exchange exchange) {
629 LOG.trace("Aggregated exchange onFailure: {}", exchange);
630
631 // must remember to remove in progress when we failed
632 inProgressCompleteExchanges.remove(exchangeId);
633 // do not remove redelivery state as we need it when we redeliver again later
634 }
635
636 public void onComplete(Exchange exchange) {
637 LOG.trace("Aggregated exchange onComplete: {}", exchange);
638
639 // only confirm if we processed without a problem
640 try {
641 aggregationRepository.confirm(exchange.getContext(), exchangeId);
642 // and remove redelivery state as well
643 redeliveryState.remove(exchangeId);
644 } finally {
645 // must remember to remove in progress when we are complete
646 inProgressCompleteExchanges.remove(exchangeId);
647 }
648 }
649
650 @Override
651 public String toString() {
652 return "AggregateOnCompletion";
653 }
654 }
655
656 /**
657 * Background task that looks for aggregated exchanges which is triggered by completion timeouts.
658 */
659 private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> {
660
661 private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) {
662 // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead
663 super(executor, requestMapPollTimeMillis, false);
664 }
665
666 @Override
667 public void purge() {
668 // must acquire the shared aggregation lock to be able to purge
669 lock.lock();
670 try {
671 super.purge();
672 } finally {
673 lock.unlock();
674 }
675 }
676
677 @Override
678 public boolean onEviction(String key, String exchangeId) {
679 log.debug("Completion timeout triggered for correlation key: {}", key);
680
681 boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
682 if (inProgress) {
683 LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
684 return true;
685 }
686
687 // get the aggregated exchange
688 Exchange answer = aggregationRepository.get(camelContext, key);
689 if (answer != null) {
690 // indicate it was completed by timeout
691 answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout");
692 onCompletion(key, answer, true);
693 }
694 return true;
695 }
696 }
697
698 /**
699 * Background task that triggers completion based on interval.
700 */
701 private final class AggregationIntervalTask implements Runnable {
702
703 public void run() {
704 // only run if CamelContext has been fully started
705 if (!camelContext.getStatus().isStarted()) {
706 LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
707 return;
708 }
709
710 LOG.trace("Starting completion interval task");
711
712 // trigger completion for all in the repository
713 Set<String> keys = aggregationRepository.getKeys();
714
715 if (keys != null && !keys.isEmpty()) {
716 // must acquire the shared aggregation lock to be able to trigger interval completion
717 lock.lock();
718 try {
719 for (String key : keys) {
720 Exchange exchange = aggregationRepository.get(camelContext, key);
721 if (exchange != null) {
722 LOG.trace("Completion interval triggered for correlation key: {}", key);
723 // indicate it was completed by interval
724 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval");
725 onCompletion(key, exchange, false);
726 }
727 }
728 } finally {
729 lock.unlock();
730 }
731 }
732
733 LOG.trace("Completion interval task complete");
734 }
735 }
736
737 /**
738 * Background task that looks for aggregated exchanges to recover.
739 */
740 private final class RecoverTask implements Runnable {
741 private final RecoverableAggregationRepository recoverable;
742
743 private RecoverTask(RecoverableAggregationRepository recoverable) {
744 this.recoverable = recoverable;
745 }
746
747 public void run() {
748 // only run if CamelContext has been fully started
749 if (!camelContext.getStatus().isStarted()) {
750 LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
751 return;
752 }
753
754 LOG.trace("Starting recover check");
755
756 Set<String> exchangeIds = recoverable.scan(camelContext);
757 for (String exchangeId : exchangeIds) {
758
759 // we may shutdown while doing recovery
760 if (!isRunAllowed()) {
761 LOG.info("We are shutting down so stop recovering");
762 return;
763 }
764
765 boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
766 if (inProgress) {
767 LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
768 } else {
769 LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
770 Exchange exchange = recoverable.recover(camelContext, exchangeId);
771 if (exchange != null) {
772 // get the correlation key
773 String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class);
774 // and mark it as redelivered
775 exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE);
776
777 // get the current redelivery data
778 RedeliveryData data = redeliveryState.get(exchange.getExchangeId());
779
780 // if we are exhausted, then move to dead letter channel
781 if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) {
782 LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
783 + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri());
784
785 // send to DLC
786 try {
787 // set redelivery counter
788 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
789 exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE);
790 deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange);
791 } catch (Throwable e) {
792 exchange.setException(e);
793 }
794
795 // handle if failed
796 if (exchange.getException() != null) {
797 getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException());
798 } else {
799 // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again
800 recoverable.confirm(camelContext, exchangeId);
801 }
802 } else {
803 // update current redelivery state
804 if (data == null) {
805 // create new data
806 data = new RedeliveryData();
807 redeliveryState.put(exchange.getExchangeId(), data);
808 }
809 data.redeliveryCounter++;
810
811 // set redelivery counter
812 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
813 if (recoverable.getMaximumRedeliveries() > 0) {
814 exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries());
815 }
816
817 LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
818
819 // not exhaust so resubmit the recovered exchange
820 onSubmitCompletion(key, exchange);
821 }
822 }
823 }
824 }
825
826 LOG.trace("Recover check complete");
827 }
828 }
829
830 @Override
831 protected void doStart() throws Exception {
832 if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null
833 && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null
834 && getCompletionSizeExpression() == null) {
835 throw new IllegalStateException("At least one of the completions options"
836 + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set");
837 }
838
839 if (getCloseCorrelationKeyOnCompletion() != null) {
840 if (getCloseCorrelationKeyOnCompletion() > 0) {
841 LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of " + getCloseCorrelationKeyOnCompletion());
842 closedCorrelationKeys = new LRUCache<Object, Object>(getCloseCorrelationKeyOnCompletion());
843 } else {
844 LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
845 closedCorrelationKeys = new HashMap<Object, Object>();
846 }
847 }
848
849 ServiceHelper.startServices(processor, aggregationRepository);
850
851 // should we use recover checker
852 if (aggregationRepository instanceof RecoverableAggregationRepository) {
853 RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository;
854 if (recoverable.isUseRecovery()) {
855 long interval = recoverable.getRecoveryIntervalInMillis();
856 if (interval <= 0) {
857 throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval);
858 }
859
860 // create a background recover thread to check every interval
861 recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
862 Runnable recoverTask = new RecoverTask(recoverable);
863 LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every " + interval + " millis.");
864 // use fixed delay so there is X interval between each run
865 recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
866
867 if (recoverable.getDeadLetterUri() != null) {
868 int max = recoverable.getMaximumRedeliveries();
869 if (max <= 0) {
870 throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
871 }
872 LOG.info("After " + max + " failed redelivery attempts Exchanges will be moved to deadLetterUri: " + recoverable.getDeadLetterUri());
873
874 // dead letter uri must be a valid endpoint
875 Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
876 if (endpoint == null) {
877 throw new NoSuchEndpointException(recoverable.getDeadLetterUri());
878 }
879 deadLetterProducerTemplate = camelContext.createProducerTemplate();
880 }
881 }
882 }
883
884 if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) {
885 throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
886 }
887 if (getCompletionInterval() > 0) {
888 LOG.info("Using CompletionInterval to run every " + getCompletionInterval() + " millis.");
889 if (getTimeoutCheckerExecutorService() == null) {
890 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
891 shutdownTimeoutCheckerExecutorService = true;
892 }
893 // trigger completion based on interval
894 getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS);
895 }
896
897 // start timeout service if its in use
898 if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
899 LOG.info("Using CompletionTimeout to trigger after " + getCompletionTimeout() + " millis of inactivity.");
900 if (getTimeoutCheckerExecutorService() == null) {
901 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
902 shutdownTimeoutCheckerExecutorService = true;
903 }
904 // check for timed out aggregated messages once every second
905 timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), 1000L);
906 // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we
907 // need to re-establish the timeout map so timeout can trigger
908 restoreTimeoutMapFromAggregationRepository();
909 ServiceHelper.startService(timeoutMap);
910 }
911 }
912
913 @Override
914 protected void doStop() throws Exception {
915 // note: we cannot do doForceCompletionOnStop from this doStop method
916 // as this is handled in the prepareShutdown method which is also invoked when stopping a route
917 // and is better suited for preparing to shutdown than this doStop method is
918
919 if (recoverService != null) {
920 camelContext.getExecutorServiceManager().shutdownNow(recoverService);
921 }
922 ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate);
923
924 if (closedCorrelationKeys != null) {
925 // it may be a service so stop it as well
926 ServiceHelper.stopService(closedCorrelationKeys);
927 closedCorrelationKeys.clear();
928 }
929 batchConsumerCorrelationKeys.clear();
930 redeliveryState.clear();
931 }
932
933 @Override
934 public void prepareShutdown(boolean forced) {
935 // we are shutting down, so force completion if this option was enabled
936 // but only do this when forced=false, as that is when we have chance to
937 // send out new messages to be routed by Camel. When forced=true, then
938 // we have to shutdown in a hurry
939 if (!forced && forceCompletionOnStop) {
940 doForceCompletionOnStop();
941 }
942 }
943
944 private void doForceCompletionOnStop() {
945 int expected = forceCompletionOfAllGroups();
946
947 StopWatch watch = new StopWatch();
948 while (inProgressCompleteExchanges.size() > 0) {
949 LOG.trace("Waiting for {} inflight exchanges to complete", inProgressCompleteExchanges.size());
950 try {
951 Thread.sleep(100);
952 } catch (InterruptedException e) {
953 // break out as we got interrupted such as the JVM terminating
954 LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", inProgressCompleteExchanges.size());
955 break;
956 }
957 }
958
959 if (expected > 0) {
960 LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.stop()));
961 }
962 }
963
964 @Override
965 protected void doShutdown() throws Exception {
966 // shutdown aggregation repository
967 ServiceHelper.stopService(aggregationRepository);
968
969 // cleanup when shutting down
970 inProgressCompleteExchanges.clear();
971
972 if (shutdownExecutorService) {
973 camelContext.getExecutorServiceManager().shutdownNow(executorService);
974 }
975 if (shutdownTimeoutCheckerExecutorService) {
976 camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService);
977 timeoutCheckerExecutorService = null;
978 }
979
980 super.doShutdown();
981 }
982
983 public int forceCompletionOfAllGroups() {
984
985 // only run if CamelContext has been fully started or is stopping
986 boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping();
987 if (!allow) {
988 LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
989 return 0;
990 }
991
992 LOG.trace("Starting force completion of all groups task");
993
994 // trigger completion for all in the repository
995 Set<String> keys = aggregationRepository.getKeys();
996
997 int total = 0;
998 if (keys != null && !keys.isEmpty()) {
999 // must acquire the shared aggregation lock to be able to trigger force completion
1000 lock.lock();
1001 total = keys.size();
1002 try {
1003 for (String key : keys) {
1004 Exchange exchange = aggregationRepository.get(camelContext, key);
1005 if (exchange != null) {
1006 LOG.trace("Force completion triggered for correlation key: {}", key);
1007 // indicate it was completed by a force completion request
1008 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "forceCompletion");
1009 onCompletion(key, exchange, false);
1010 }
1011 }
1012 } finally {
1013 lock.unlock();
1014 }
1015 }
1016 LOG.trace("Completed force completion of all groups task");
1017
1018 if (total > 0) {
1019 LOG.debug("Forcing completion of all groups with {} exchanges", total);
1020 }
1021 return total;
1022 }
1023
1024 }