001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor.aggregate;
018
019import java.util.ArrayList;
020import java.util.Collections;
021import java.util.LinkedHashSet;
022import java.util.List;
023import java.util.Map;
024import java.util.Set;
025import java.util.concurrent.ConcurrentHashMap;
026import java.util.concurrent.ConcurrentSkipListSet;
027import java.util.concurrent.ExecutorService;
028import java.util.concurrent.ScheduledExecutorService;
029import java.util.concurrent.TimeUnit;
030import java.util.concurrent.atomic.AtomicInteger;
031import java.util.concurrent.locks.Lock;
032import java.util.concurrent.locks.ReentrantLock;
033
034import org.apache.camel.AsyncCallback;
035import org.apache.camel.AsyncProcessor;
036import org.apache.camel.CamelContext;
037import org.apache.camel.CamelExchangeException;
038import org.apache.camel.Endpoint;
039import org.apache.camel.Exchange;
040import org.apache.camel.Expression;
041import org.apache.camel.Navigate;
042import org.apache.camel.NoSuchEndpointException;
043import org.apache.camel.Predicate;
044import org.apache.camel.Processor;
045import org.apache.camel.ProducerTemplate;
046import org.apache.camel.TimeoutMap;
047import org.apache.camel.Traceable;
048import org.apache.camel.spi.AggregationRepository;
049import org.apache.camel.spi.ExceptionHandler;
050import org.apache.camel.spi.OptimisticLockingAggregationRepository;
051import org.apache.camel.spi.RecoverableAggregationRepository;
052import org.apache.camel.spi.ShutdownPrepared;
053import org.apache.camel.spi.Synchronization;
054import org.apache.camel.support.DefaultTimeoutMap;
055import org.apache.camel.support.LoggingExceptionHandler;
056import org.apache.camel.support.ServiceSupport;
057import org.apache.camel.util.AsyncProcessorHelper;
058import org.apache.camel.util.ExchangeHelper;
059import org.apache.camel.util.LRUCache;
060import org.apache.camel.util.ObjectHelper;
061import org.apache.camel.util.ServiceHelper;
062import org.apache.camel.util.StopWatch;
063import org.apache.camel.util.TimeUtils;
064import org.slf4j.Logger;
065import org.slf4j.LoggerFactory;
066
067/**
068 * An implementation of the <a
069 * href="http://camel.apache.org/aggregator2.html">Aggregator</a>
070 * pattern where a batch of messages are processed (up to a maximum amount or
071 * until some timeout is reached) and messages for the same correlation key are
072 * combined together using some kind of {@link AggregationStrategy}
073 * (by default the latest message is used) to compress many message exchanges
074 * into a smaller number of exchanges.
075 * <p/>
076 * A good example of this is stock market data; you may be receiving 30,000
077 * messages/second and you may want to throttle it right down so that multiple
078 * messages for the same stock are combined (or just the latest message is used
079 * and older prices are discarded). Another idea is to combine line item messages
080 * together into a single invoice message.
081 */
082public class AggregateProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, ShutdownPrepared {
083
084    public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker";
085
086    private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class);
087
088    private final Lock lock = new ReentrantLock();
089    private final CamelContext camelContext;
090    private final Processor processor;
091    private AggregationStrategy aggregationStrategy;
092    private Expression correlationExpression;
093    private final ExecutorService executorService;
094    private final boolean shutdownExecutorService;
095    private OptimisticLockRetryPolicy optimisticLockRetryPolicy = new OptimisticLockRetryPolicy();
096    private ScheduledExecutorService timeoutCheckerExecutorService;
097    private boolean shutdownTimeoutCheckerExecutorService;
098    private ScheduledExecutorService recoverService;
099    // store correlation key -> exchange id in timeout map
100    private TimeoutMap<String, String> timeoutMap;
101    private ExceptionHandler exceptionHandler;
102    private AggregationRepository aggregationRepository;
103    private Map<String, String> closedCorrelationKeys;
104    private final Set<String> batchConsumerCorrelationKeys = new ConcurrentSkipListSet<String>();
105    private final Set<String> inProgressCompleteExchanges = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
106    private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<String, RedeliveryData>();
107
108    // keep booking about redelivery
109    private class RedeliveryData {
110        int redeliveryCounter;
111    }
112
113    // options
114    private boolean ignoreInvalidCorrelationKeys;
115    private Integer closeCorrelationKeyOnCompletion;
116    private boolean parallelProcessing;
117    private boolean optimisticLocking;
118
119    // different ways to have completion triggered
120    private boolean eagerCheckCompletion;
121    private Predicate completionPredicate;
122    private long completionTimeout;
123    private Expression completionTimeoutExpression;
124    private long completionInterval;
125    private int completionSize;
126    private Expression completionSizeExpression;
127    private boolean completionFromBatchConsumer;
128    private AtomicInteger batchConsumerCounter = new AtomicInteger();
129    private boolean discardOnCompletionTimeout;
130    private boolean forceCompletionOnStop;
131
132    private ProducerTemplate deadLetterProducerTemplate;
133
134    public AggregateProcessor(CamelContext camelContext, Processor processor,
135                              Expression correlationExpression, AggregationStrategy aggregationStrategy,
136                              ExecutorService executorService, boolean shutdownExecutorService) {
137        ObjectHelper.notNull(camelContext, "camelContext");
138        ObjectHelper.notNull(processor, "processor");
139        ObjectHelper.notNull(correlationExpression, "correlationExpression");
140        ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy");
141        ObjectHelper.notNull(executorService, "executorService");
142        this.camelContext = camelContext;
143        this.processor = processor;
144        this.correlationExpression = correlationExpression;
145        this.aggregationStrategy = aggregationStrategy;
146        this.executorService = executorService;
147        this.shutdownExecutorService = shutdownExecutorService;
148        this.exceptionHandler = new LoggingExceptionHandler(camelContext, getClass());
149    }
150
151    @Override
152    public String toString() {
153        return "AggregateProcessor[to: " + processor + "]";
154    }
155
156    public String getTraceLabel() {
157        return "aggregate[" + correlationExpression + "]";
158    }
159
160    public List<Processor> next() {
161        if (!hasNext()) {
162            return null;
163        }
164        List<Processor> answer = new ArrayList<Processor>(1);
165        answer.add(processor);
166        return answer;
167    }
168
169    public boolean hasNext() {
170        return processor != null;
171    }
172
173    public void process(Exchange exchange) throws Exception {
174        AsyncProcessorHelper.process(this, exchange);
175    }
176
177    public boolean process(Exchange exchange, AsyncCallback callback) {
178        try {
179            doProcess(exchange);
180        } catch (Throwable e) {
181            exchange.setException(e);
182        }
183        callback.done(true);
184        return true;
185    }
186
187    protected void doProcess(Exchange exchange) throws Exception {
188
189        //check for the special header to force completion of all groups (and ignore the exchange otherwise)
190        boolean completeAllGroups = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class);
191        if (completeAllGroups) {
192            forceCompletionOfAllGroups();
193            return;
194        }
195
196        // compute correlation expression
197        String key = correlationExpression.evaluate(exchange, String.class);
198        if (ObjectHelper.isEmpty(key)) {
199            // we have a bad correlation key
200            if (isIgnoreInvalidCorrelationKeys()) {
201                LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
202                return;
203            } else {
204                throw new CamelExchangeException("Invalid correlation key", exchange);
205            }
206        }
207
208        // is the correlation key closed?
209        if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) {
210            throw new ClosedCorrelationKeyException(key, exchange);
211        }
212
213        // when optimist locking is enabled we keep trying until we succeed
214        if (optimisticLocking) {
215            List<Exchange> aggregated = null;
216            boolean exhaustedRetries = true;
217            int attempt = 0;
218            do {
219                attempt++;
220                // copy exchange, and do not share the unit of work
221                // the aggregated output runs in another unit of work
222                Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
223                try {
224                    aggregated = doAggregation(key, copy);
225                    exhaustedRetries = false;
226                    break;
227                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
228                    LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}",
229                              new Object[]{attempt, aggregationRepository, key, copy, e});
230                    optimisticLockRetryPolicy.doDelay(attempt);
231                }
232            } while (optimisticLockRetryPolicy.shouldRetry(attempt));
233
234            if (exhaustedRetries) {
235                throw new CamelExchangeException("Exhausted optimistic locking retry attempts, tried " + attempt + " times", exchange,
236                        new OptimisticLockingAggregationRepository.OptimisticLockingException());
237            } else if (aggregated != null) {
238                // we are completed so submit to completion
239                for (Exchange agg : aggregated) {
240                    onSubmitCompletion(key, agg);
241                }
242            }
243        } else {
244            // copy exchange, and do not share the unit of work
245            // the aggregated output runs in another unit of work
246            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
247
248            // when memory based then its fast using synchronized, but if the aggregation repository is IO
249            // bound such as JPA etc then concurrent aggregation per correlation key could
250            // improve performance as we can run aggregation repository get/add in parallel
251            List<Exchange> aggregated = null;
252            lock.lock();
253            try {
254                aggregated = doAggregation(key, copy);
255            } finally {
256                lock.unlock();
257            }
258
259            // we are completed so do that work outside the lock
260            if (aggregated != null) {
261                for (Exchange agg : aggregated) {
262                    onSubmitCompletion(key, agg);
263                }
264            }
265        }
266
267        // check for the special header to force completion of all groups (inclusive of the message)
268        boolean completeAllGroupsInclusive = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE, false, boolean.class);
269        if (completeAllGroupsInclusive) {
270            forceCompletionOfAllGroups();
271        }
272    }
273
274    /**
275     * Aggregates the exchange with the given correlation key
276     * <p/>
277     * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key
278     * in parallel.
279     * <p/>
280     * The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)}
281     * method which sends out the aggregated and completed {@link Exchange}.
282     *
283     * @param key      the correlation key
284     * @param newExchange the exchange
285     * @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete
286     * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
287     */
288    private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException {
289        LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
290
291        Exchange answer;
292        Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key);
293        Exchange oldExchange = originalExchange;
294
295        Integer size = 1;
296        if (oldExchange != null) {
297            // hack to support legacy AggregationStrategy's that modify and return the oldExchange, these will not
298            // working when using an identify based approach for optimistic locking like the MemoryAggregationRepository.
299            if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) {
300                oldExchange = originalExchange.copy();
301            }
302            size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class);
303            size++;
304        }
305
306        // check if we are complete
307        String complete = null;
308        if (isEagerCheckCompletion()) {
309            // put the current aggregated size on the exchange so its avail during completion check
310            newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
311            complete = isCompleted(key, newExchange);
312            // remove it afterwards
313            newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
314        }
315
316        // prepare the exchanges for aggregation and then aggregate them
317        ExchangeHelper.prepareAggregation(oldExchange, newExchange);
318        // must catch any exception from aggregation
319        try {
320            answer = onAggregation(oldExchange, newExchange);
321        } catch (Throwable e) {
322            throw new CamelExchangeException("Error occurred during aggregation", newExchange, e);
323        }
324        if (answer == null) {
325            throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange);
326        }
327
328        // update the aggregated size
329        answer.setProperty(Exchange.AGGREGATED_SIZE, size);
330
331        // maybe we should check completion after the aggregation
332        if (!isEagerCheckCompletion()) {
333            complete = isCompleted(key, answer);
334        }
335
336        List<Exchange> list = new ArrayList<Exchange>();
337
338        // only need to update aggregation repository if we are not complete
339        if (complete == null) {
340            doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer);
341            // we are not complete so the answer should be null
342            answer = null;
343        } else {
344            // if batch consumer completion is enabled then we need to complete the group
345            if ("consumer".equals(complete)) {
346                for (String batchKey : batchConsumerCorrelationKeys) {
347                    Exchange batchAnswer;
348                    if (batchKey.equals(key)) {
349                        // skip the current aggregated key as we have already aggregated it and have the answer
350                        batchAnswer = answer;
351                    } else {
352                        batchAnswer = aggregationRepository.get(camelContext, batchKey);
353                    }
354
355                    if (batchAnswer != null) {
356                        batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
357                        onCompletion(batchKey, originalExchange, batchAnswer, false);
358                        list.add(batchAnswer);
359                    }
360                }
361                batchConsumerCorrelationKeys.clear();
362                // we have already submitted to completion, so answer should be null
363                answer = null;
364            } else {
365                // we are complete for this exchange
366                answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
367                answer = onCompletion(key, originalExchange, answer, false);
368            }
369        }
370
371        LOG.trace("onAggregation +++  end  +++ with correlation key: {}", key);
372        if (answer != null) {
373            list.add(answer);
374        }
375        return list;
376    }
377
378    protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) {
379        LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", new Object[]{oldExchange, newExchange, key});
380        if (optimisticLocking) {
381            try {
382                ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange);
383            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
384                onOptimisticLockingFailure(oldExchange, newExchange);
385                throw e;
386            }
387        } else {
388            aggregationRepository.add(camelContext, key, newExchange);
389        }
390    }
391
392    protected void onOptimisticLockingFailure(Exchange oldExchange, Exchange newExchange) {
393        if (aggregationStrategy instanceof OptimisticLockingAwareAggregationStrategy) {
394            LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}",
395                      new Object[]{aggregationStrategy, oldExchange, newExchange});
396            ((OptimisticLockingAwareAggregationStrategy)aggregationStrategy).onOptimisticLockFailure(oldExchange, newExchange);
397        }
398    }
399
400    /**
401     * Tests whether the given exchange is complete or not
402     *
403     * @param key      the correlation key
404     * @param exchange the incoming exchange
405     * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion
406     */
407    protected String isCompleted(String key, Exchange exchange) {
408        // batch consumer completion must always run first
409        if (isCompletionFromBatchConsumer()) {
410            batchConsumerCorrelationKeys.add(key);
411            batchConsumerCounter.incrementAndGet();
412            int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class);
413            if (size > 0 && batchConsumerCounter.intValue() >= size) {
414                // batch consumer is complete then reset the counter
415                batchConsumerCounter.set(0);
416                return "consumer";
417            }
418        }
419
420        if (exchange.getProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, false, boolean.class)) {
421            return "strategy";
422        }
423
424        if (getCompletionPredicate() != null) {
425            boolean answer = getCompletionPredicate().matches(exchange);
426            if (answer) {
427                return "predicate";
428            }
429        }
430
431        boolean sizeChecked = false;
432        if (getCompletionSizeExpression() != null) {
433            Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class);
434            if (value != null && value > 0) {
435                // mark as already checked size as expression takes precedence over static configured
436                sizeChecked = true;
437                int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
438                if (size >= value) {
439                    return "size";
440                }
441            }
442        }
443        if (!sizeChecked && getCompletionSize() > 0) {
444            int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
445            if (size >= getCompletionSize()) {
446                return "size";
447            }
448        }
449
450        // timeout can be either evaluated based on an expression or from a fixed value
451        // expression takes precedence
452        boolean timeoutSet = false;
453        if (getCompletionTimeoutExpression() != null) {
454            Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class);
455            if (value != null && value > 0) {
456                if (LOG.isTraceEnabled()) {
457                    LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
458                            new Object[]{key, value, exchange});
459                }
460                addExchangeToTimeoutMap(key, exchange, value);
461                timeoutSet = true;
462            }
463        }
464        if (!timeoutSet && getCompletionTimeout() > 0) {
465            // timeout is used so use the timeout map to keep an eye on this
466            if (LOG.isTraceEnabled()) {
467                LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
468                        new Object[]{key, getCompletionTimeout(), exchange});
469            }
470            addExchangeToTimeoutMap(key, exchange, getCompletionTimeout());
471        }
472
473        // not complete
474        return null;
475    }
476
477    protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) {
478        return aggregationStrategy.aggregate(oldExchange, newExchange);
479    }
480
481    protected Exchange onCompletion(final String key, final Exchange original, final Exchange aggregated, boolean fromTimeout) {
482        // store the correlation key as property before we remove so the repository has that information
483        if (original != null) {
484            original.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
485        }
486        aggregated.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
487
488        // only remove if we have previous added (as we could potentially complete with only 1 exchange)
489        // (if we have previous added then we have that as the original exchange)
490        if (original != null) {
491            // remove from repository as its completed, we do this first as to trigger any OptimisticLockingException's
492            aggregationRepository.remove(aggregated.getContext(), key, original);
493        }
494
495        if (!fromTimeout && timeoutMap != null) {
496            // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker)
497            timeoutMap.remove(key);
498        }
499
500        // this key has been closed so add it to the closed map
501        if (closedCorrelationKeys != null) {
502            closedCorrelationKeys.put(key, key);
503        }
504
505        if (fromTimeout) {
506            // invoke timeout if its timeout aware aggregation strategy,
507            // to allow any custom processing before discarding the exchange
508            if (aggregationStrategy instanceof TimeoutAwareAggregationStrategy) {
509                long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1;
510                ((TimeoutAwareAggregationStrategy) aggregationStrategy).timeout(aggregated, -1, -1, timeout);
511            }
512        }
513
514        Exchange answer;
515        if (fromTimeout && isDiscardOnCompletionTimeout()) {
516            // discard due timeout
517            LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated);
518            // must confirm the discarded exchange
519            aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId());
520            // and remove redelivery state as well
521            redeliveryState.remove(aggregated.getExchangeId());
522            // the completion was from timeout and we should just discard it
523            answer = null;
524        } else {
525            // the aggregated exchange should be published (sent out)
526            answer = aggregated;
527        }
528
529        return answer;
530    }
531
532    private void onSubmitCompletion(final String key, final Exchange exchange) {
533        LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
534
535        // add this as in progress before we submit the task
536        inProgressCompleteExchanges.add(exchange.getExchangeId());
537
538        // invoke the on completion callback
539        if (aggregationStrategy instanceof CompletionAwareAggregationStrategy) {
540            ((CompletionAwareAggregationStrategy) aggregationStrategy).onCompletion(exchange);
541        }
542
543        // send this exchange
544        executorService.submit(new Runnable() {
545            public void run() {
546                LOG.debug("Processing aggregated exchange: {}", exchange);
547
548                // add on completion task so we remember to update the inProgressCompleteExchanges
549                exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId()));
550
551                try {
552                    processor.process(exchange);
553                } catch (Throwable e) {
554                    exchange.setException(e);
555                }
556
557                // log exception if there was a problem
558                if (exchange.getException() != null) {
559                    // if there was an exception then let the exception handler handle it
560                    getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException());
561                } else {
562                    LOG.trace("Processing aggregated exchange: {} complete.", exchange);
563                }
564            }
565        });
566    }
567
568    /**
569     * Restores the timeout map with timeout values from the aggregation repository.
570     * <p/>
571     * This is needed in case the aggregator has been stopped and started again (for example a server restart).
572     * Then the existing exchanges from the {@link AggregationRepository} must have their timeout conditions restored.
573     */
574    protected void restoreTimeoutMapFromAggregationRepository() throws Exception {
575        // grab the timeout value for each partly aggregated exchange
576        Set<String> keys = aggregationRepository.getKeys();
577        if (keys == null || keys.isEmpty()) {
578            return;
579        }
580
581        StopWatch watch = new StopWatch();
582        LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
583
584        for (String key : keys) {
585            Exchange exchange = aggregationRepository.get(camelContext, key);
586            // grab the timeout value
587            long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0;
588            if (timeout > 0) {
589                LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
590                addExchangeToTimeoutMap(key, exchange, timeout);
591            }
592        }
593
594        // log duration of this task so end user can see how long it takes to pre-check this upon starting
595        LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
596                timeoutMap.size(), TimeUtils.printDuration(watch.stop()));
597    }
598
599    /**
600     * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts.
601     *
602     * @param key      the correlation key
603     * @param exchange the exchange
604     * @param timeout  the timeout value in millis
605     */
606    private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) {
607        // store the timeout value on the exchange as well, in case we need it later
608        exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout);
609        timeoutMap.put(key, exchange.getExchangeId(), timeout);
610    }
611
612    public Predicate getCompletionPredicate() {
613        return completionPredicate;
614    }
615
616    public void setCompletionPredicate(Predicate completionPredicate) {
617        this.completionPredicate = completionPredicate;
618    }
619
620    public boolean isEagerCheckCompletion() {
621        return eagerCheckCompletion;
622    }
623
624    public void setEagerCheckCompletion(boolean eagerCheckCompletion) {
625        this.eagerCheckCompletion = eagerCheckCompletion;
626    }
627
628    public long getCompletionTimeout() {
629        return completionTimeout;
630    }
631
632    public void setCompletionTimeout(long completionTimeout) {
633        this.completionTimeout = completionTimeout;
634    }
635
636    public Expression getCompletionTimeoutExpression() {
637        return completionTimeoutExpression;
638    }
639
640    public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) {
641        this.completionTimeoutExpression = completionTimeoutExpression;
642    }
643
644    public long getCompletionInterval() {
645        return completionInterval;
646    }
647
648    public void setCompletionInterval(long completionInterval) {
649        this.completionInterval = completionInterval;
650    }
651
652    public int getCompletionSize() {
653        return completionSize;
654    }
655
656    public void setCompletionSize(int completionSize) {
657        this.completionSize = completionSize;
658    }
659
660    public Expression getCompletionSizeExpression() {
661        return completionSizeExpression;
662    }
663
664    public void setCompletionSizeExpression(Expression completionSizeExpression) {
665        this.completionSizeExpression = completionSizeExpression;
666    }
667
668    public boolean isIgnoreInvalidCorrelationKeys() {
669        return ignoreInvalidCorrelationKeys;
670    }
671
672    public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) {
673        this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys;
674    }
675
676    public Integer getCloseCorrelationKeyOnCompletion() {
677        return closeCorrelationKeyOnCompletion;
678    }
679
680    public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) {
681        this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion;
682    }
683
684    public boolean isCompletionFromBatchConsumer() {
685        return completionFromBatchConsumer;
686    }
687
688    public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) {
689        this.completionFromBatchConsumer = completionFromBatchConsumer;
690    }
691
692    public ExceptionHandler getExceptionHandler() {
693        return exceptionHandler;
694    }
695
696    public void setExceptionHandler(ExceptionHandler exceptionHandler) {
697        this.exceptionHandler = exceptionHandler;
698    }
699
700    public boolean isParallelProcessing() {
701        return parallelProcessing;
702    }
703
704    public void setParallelProcessing(boolean parallelProcessing) {
705        this.parallelProcessing = parallelProcessing;
706    }
707
708    public boolean isOptimisticLocking() {
709        return optimisticLocking;
710    }
711
712    public void setOptimisticLocking(boolean optimisticLocking) {
713        this.optimisticLocking = optimisticLocking;
714    }
715
716    public AggregationRepository getAggregationRepository() {
717        return aggregationRepository;
718    }
719
720    public void setAggregationRepository(AggregationRepository aggregationRepository) {
721        this.aggregationRepository = aggregationRepository;
722    }
723
724    public boolean isDiscardOnCompletionTimeout() {
725        return discardOnCompletionTimeout;
726    }
727
728    public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) {
729        this.discardOnCompletionTimeout = discardOnCompletionTimeout;
730    }
731
732    public void setForceCompletionOnStop(boolean forceCompletionOnStop) {
733        this.forceCompletionOnStop = forceCompletionOnStop;
734    }
735
736    public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) {
737        this.timeoutCheckerExecutorService = timeoutCheckerExecutorService;
738    }
739
740    public ScheduledExecutorService getTimeoutCheckerExecutorService() {
741        return timeoutCheckerExecutorService;
742    }
743
744    public boolean isShutdownTimeoutCheckerExecutorService() {
745        return shutdownTimeoutCheckerExecutorService;
746    }
747
748    public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) {
749        this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService;
750    }
751
752    public void setOptimisticLockRetryPolicy(OptimisticLockRetryPolicy optimisticLockRetryPolicy) {
753        this.optimisticLockRetryPolicy = optimisticLockRetryPolicy;
754    }
755
756    public OptimisticLockRetryPolicy getOptimisticLockRetryPolicy() {
757        return optimisticLockRetryPolicy;
758    }
759
760    public AggregationStrategy getAggregationStrategy() {
761        return aggregationStrategy;
762    }
763
764    public void setAggregationStrategy(AggregationStrategy aggregationStrategy) {
765        this.aggregationStrategy = aggregationStrategy;
766    }
767
768    public Expression getCorrelationExpression() {
769        return correlationExpression;
770    }
771
772    public void setCorrelationExpression(Expression correlationExpression) {
773        this.correlationExpression = correlationExpression;
774    }
775
776    /**
777     * On completion task which keeps the booking of the in progress up to date
778     */
779    private final class AggregateOnCompletion implements Synchronization {
780        private final String exchangeId;
781
782        private AggregateOnCompletion(String exchangeId) {
783            // must use the original exchange id as it could potentially change if send over SEDA etc.
784            this.exchangeId = exchangeId;
785        }
786
787        public void onFailure(Exchange exchange) {
788            LOG.trace("Aggregated exchange onFailure: {}", exchange);
789
790            // must remember to remove in progress when we failed
791            inProgressCompleteExchanges.remove(exchangeId);
792            // do not remove redelivery state as we need it when we redeliver again later
793        }
794
795        public void onComplete(Exchange exchange) {
796            LOG.trace("Aggregated exchange onComplete: {}", exchange);
797
798            // only confirm if we processed without a problem
799            try {
800                aggregationRepository.confirm(exchange.getContext(), exchangeId);
801                // and remove redelivery state as well
802                redeliveryState.remove(exchangeId);
803            } finally {
804                // must remember to remove in progress when we are complete
805                inProgressCompleteExchanges.remove(exchangeId);
806            }
807        }
808
809        @Override
810        public String toString() {
811            return "AggregateOnCompletion";
812        }
813    }
814
815    /**
816     * Background task that looks for aggregated exchanges which is triggered by completion timeouts.
817     */
818    private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> {
819
820        private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) {
821            // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead
822            super(executor, requestMapPollTimeMillis, optimisticLocking);
823        }
824
825        @Override
826        public void purge() {
827            // must acquire the shared aggregation lock to be able to purge
828            if (!optimisticLocking) { lock.lock(); }
829            try {
830                super.purge();
831            } finally {
832                if (!optimisticLocking) { lock.unlock(); }
833            }
834        }
835
836        @Override
837        public boolean onEviction(String key, String exchangeId) {
838            log.debug("Completion timeout triggered for correlation key: {}", key);
839
840            boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
841            if (inProgress) {
842                LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
843                return true;
844            }
845
846            // get the aggregated exchange
847            boolean evictionStolen = false;
848            Exchange answer = aggregationRepository.get(camelContext, key);
849            if (answer == null) {
850                evictionStolen = true;
851            } else {
852                // indicate it was completed by timeout
853                answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout");
854                try {
855                    answer = onCompletion(key, answer, answer, true);
856                    if (answer != null) {
857                        onSubmitCompletion(key, answer);
858                    }
859                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
860                    evictionStolen = true;
861                }
862            }
863
864            if (optimisticLocking && evictionStolen) {
865                LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction "
866                          + "for exchange with id: {} and correlation id: {}", exchangeId, key);
867            }
868            return true;
869        }
870    }
871
872    /**
873     * Background task that triggers completion based on interval.
874     */
875    private final class AggregationIntervalTask implements Runnable {
876
877        public void run() {
878            // only run if CamelContext has been fully started
879            if (!camelContext.getStatus().isStarted()) {
880                LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
881                return;
882            }
883
884            LOG.trace("Starting completion interval task");
885
886            // trigger completion for all in the repository
887            Set<String> keys = aggregationRepository.getKeys();
888
889            if (keys != null && !keys.isEmpty()) {
890                // must acquire the shared aggregation lock to be able to trigger interval completion
891                if (!optimisticLocking) { lock.lock(); }
892                try {
893                    for (String key : keys) {
894                        boolean stolenInterval = false;
895                        Exchange exchange = aggregationRepository.get(camelContext, key);
896                        if (exchange == null) {
897                            stolenInterval = true;
898                        } else {
899                            LOG.trace("Completion interval triggered for correlation key: {}", key);
900                            // indicate it was completed by interval
901                            exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval");
902                            try {
903                                Exchange answer = onCompletion(key, exchange, exchange, false);
904                                if (answer != null) {
905                                    onSubmitCompletion(key, answer);
906                                }
907                            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
908                                stolenInterval = true;
909                            }
910                        }
911                        if (optimisticLocking && stolenInterval) {
912                            LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key);
913                        }
914                    }
915                } finally {
916                    if (!optimisticLocking) { lock.unlock(); }
917                }
918            }
919
920            LOG.trace("Completion interval task complete");
921        }
922    }
923
924    /**
925     * Background task that looks for aggregated exchanges to recover.
926     */
927    private final class RecoverTask implements Runnable {
928        private final RecoverableAggregationRepository recoverable;
929
930        private RecoverTask(RecoverableAggregationRepository recoverable) {
931            this.recoverable = recoverable;
932        }
933
934        public void run() {
935            // only run if CamelContext has been fully started
936            if (!camelContext.getStatus().isStarted()) {
937                LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
938                return;
939            }
940
941            LOG.trace("Starting recover check");
942
943            // copy the current in progress before doing scan
944            final Set<String> copyOfInProgress = new LinkedHashSet<String>(inProgressCompleteExchanges);
945
946            Set<String> exchangeIds = recoverable.scan(camelContext);
947            for (String exchangeId : exchangeIds) {
948
949                // we may shutdown while doing recovery
950                if (!isRunAllowed()) {
951                    LOG.info("We are shutting down so stop recovering");
952                    return;
953                }
954
955                // consider in progress if it was in progress before we did the scan, or currently after we did the scan
956                // its safer to consider it in progress than risk duplicates due both in progress + recovered
957                boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId);
958                if (inProgress) {
959                    LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
960                } else {
961                    LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
962                    Exchange exchange = recoverable.recover(camelContext, exchangeId);
963                    if (exchange != null) {
964                        // get the correlation key
965                        String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class);
966                        // and mark it as redelivered
967                        exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE);
968
969                        // get the current redelivery data
970                        RedeliveryData data = redeliveryState.get(exchange.getExchangeId());
971
972                        // if we are exhausted, then move to dead letter channel
973                        if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) {
974                            LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
975                                    + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri());
976
977                            // send to DLC
978                            try {
979                                // set redelivery counter
980                                exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
981                                exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE);
982                                deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange);
983                            } catch (Throwable e) {
984                                exchange.setException(e);
985                            }
986
987                            // handle if failed
988                            if (exchange.getException() != null) {
989                                getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException());
990                            } else {
991                                // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again
992                                recoverable.confirm(camelContext, exchangeId);
993                            }
994                        } else {
995                            // update current redelivery state
996                            if (data == null) {
997                                // create new data
998                                data = new RedeliveryData();
999                                redeliveryState.put(exchange.getExchangeId(), data);
1000                            }
1001                            data.redeliveryCounter++;
1002
1003                            // set redelivery counter
1004                            exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1005                            if (recoverable.getMaximumRedeliveries() > 0) {
1006                                exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries());
1007                            }
1008
1009                            LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
1010
1011                            // not exhaust so resubmit the recovered exchange
1012                            onSubmitCompletion(key, exchange);
1013                        }
1014                    }
1015                }
1016            }
1017
1018            LOG.trace("Recover check complete");
1019        }
1020    }
1021
1022    @Override
1023    protected void doStart() throws Exception {
1024        if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null
1025                && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null
1026                && getCompletionSizeExpression() == null) {
1027            throw new IllegalStateException("At least one of the completions options"
1028                    + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set");
1029        }
1030
1031        if (getCloseCorrelationKeyOnCompletion() != null) {
1032            if (getCloseCorrelationKeyOnCompletion() > 0) {
1033                LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of " + getCloseCorrelationKeyOnCompletion());
1034                closedCorrelationKeys = new LRUCache<String, String>(getCloseCorrelationKeyOnCompletion());
1035            } else {
1036                LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
1037                closedCorrelationKeys = new ConcurrentHashMap<String, String>();
1038            }
1039        }
1040
1041        if (aggregationRepository == null) {
1042            aggregationRepository = new MemoryAggregationRepository(optimisticLocking);
1043            LOG.info("Defaulting to MemoryAggregationRepository");
1044        }
1045
1046        if (optimisticLocking) {
1047            if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) {
1048                throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository");
1049            }
1050            LOG.info("Optimistic locking is enabled");
1051        }
1052
1053        ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository);
1054
1055        // should we use recover checker
1056        if (aggregationRepository instanceof RecoverableAggregationRepository) {
1057            RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository;
1058            if (recoverable.isUseRecovery()) {
1059                long interval = recoverable.getRecoveryIntervalInMillis();
1060                if (interval <= 0) {
1061                    throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval);
1062                }
1063
1064                // create a background recover thread to check every interval
1065                recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
1066                Runnable recoverTask = new RecoverTask(recoverable);
1067                LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every " + interval + " millis.");
1068                // use fixed delay so there is X interval between each run
1069                recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
1070
1071                if (recoverable.getDeadLetterUri() != null) {
1072                    int max = recoverable.getMaximumRedeliveries();
1073                    if (max <= 0) {
1074                        throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
1075                    }
1076                    LOG.info("After " + max + " failed redelivery attempts Exchanges will be moved to deadLetterUri: " + recoverable.getDeadLetterUri());
1077
1078                    // dead letter uri must be a valid endpoint
1079                    Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
1080                    if (endpoint == null) {
1081                        throw new NoSuchEndpointException(recoverable.getDeadLetterUri());
1082                    }
1083                    deadLetterProducerTemplate = camelContext.createProducerTemplate();
1084                }
1085            }
1086        }
1087
1088        if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) {
1089            throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
1090        }
1091        if (getCompletionInterval() > 0) {
1092            LOG.info("Using CompletionInterval to run every " + getCompletionInterval() + " millis.");
1093            if (getTimeoutCheckerExecutorService() == null) {
1094                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1095                shutdownTimeoutCheckerExecutorService = true;
1096            }
1097            // trigger completion based on interval
1098            getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS);
1099        }
1100
1101        // start timeout service if its in use
1102        if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
1103            LOG.info("Using CompletionTimeout to trigger after " + getCompletionTimeout() + " millis of inactivity.");
1104            if (getTimeoutCheckerExecutorService() == null) {
1105                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1106                shutdownTimeoutCheckerExecutorService = true;
1107            }
1108            // check for timed out aggregated messages once every second
1109            timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), 1000L);
1110            // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we
1111            // need to re-establish the timeout map so timeout can trigger
1112            restoreTimeoutMapFromAggregationRepository();
1113            ServiceHelper.startService(timeoutMap);
1114        }
1115    }
1116
1117    @Override
1118    protected void doStop() throws Exception {
1119        // note: we cannot do doForceCompletionOnStop from this doStop method
1120        // as this is handled in the prepareShutdown method which is also invoked when stopping a route
1121        // and is better suited for preparing to shutdown than this doStop method is
1122
1123        if (recoverService != null) {
1124            camelContext.getExecutorServiceManager().shutdown(recoverService);
1125        }
1126        ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate);
1127
1128        if (closedCorrelationKeys != null) {
1129            // it may be a service so stop it as well
1130            ServiceHelper.stopService(closedCorrelationKeys);
1131            closedCorrelationKeys.clear();
1132        }
1133        batchConsumerCorrelationKeys.clear();
1134        redeliveryState.clear();
1135    }
1136
1137    @Override
1138    public void prepareShutdown(boolean forced) {
1139        // we are shutting down, so force completion if this option was enabled
1140        // but only do this when forced=false, as that is when we have chance to
1141        // send out new messages to be routed by Camel. When forced=true, then
1142        // we have to shutdown in a hurry
1143        if (!forced && forceCompletionOnStop) {
1144            doForceCompletionOnStop();
1145        }
1146    }
1147
1148    private void doForceCompletionOnStop() {
1149        int expected = forceCompletionOfAllGroups();
1150
1151        StopWatch watch = new StopWatch();
1152        while (inProgressCompleteExchanges.size() > 0) {
1153            LOG.trace("Waiting for {} inflight exchanges to complete", inProgressCompleteExchanges.size());
1154            try {
1155                Thread.sleep(100);
1156            } catch (InterruptedException e) {
1157                // break out as we got interrupted such as the JVM terminating
1158                LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", inProgressCompleteExchanges.size());
1159                break;
1160            }
1161        }
1162
1163        if (expected > 0) {
1164            LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.stop()));
1165        }
1166    }
1167
1168    @Override
1169    protected void doShutdown() throws Exception {
1170        // shutdown aggregation repository and the strategy
1171        ServiceHelper.stopAndShutdownServices(aggregationRepository, aggregationStrategy);
1172
1173        // cleanup when shutting down
1174        inProgressCompleteExchanges.clear();
1175
1176        if (shutdownExecutorService) {
1177            camelContext.getExecutorServiceManager().shutdownNow(executorService);
1178        }
1179        if (shutdownTimeoutCheckerExecutorService) {
1180            camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService);
1181            timeoutCheckerExecutorService = null;
1182        }
1183
1184        super.doShutdown();
1185    }
1186
1187    public int forceCompletionOfAllGroups() {
1188
1189        // only run if CamelContext has been fully started or is stopping
1190        boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping();
1191        if (!allow) {
1192            LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
1193            return 0;
1194        }
1195
1196        LOG.trace("Starting force completion of all groups task");
1197
1198        // trigger completion for all in the repository
1199        Set<String> keys = aggregationRepository.getKeys();
1200
1201        int total = 0;
1202        if (keys != null && !keys.isEmpty()) {
1203            // must acquire the shared aggregation lock to be able to trigger force completion
1204            if (!optimisticLocking) { lock.lock(); }
1205            total = keys.size();
1206            try {
1207                for (String key : keys) {
1208                    Exchange exchange = aggregationRepository.get(camelContext, key);
1209                    if (exchange != null) {
1210                        LOG.trace("Force completion triggered for correlation key: {}", key);
1211                        // indicate it was completed by a force completion request
1212                        exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "forceCompletion");
1213                        Exchange answer = onCompletion(key, exchange, exchange, false);
1214                        if (answer != null) {
1215                            onSubmitCompletion(key, answer);
1216                        }
1217                    }
1218                }
1219            } finally {
1220                if (!optimisticLocking) { lock.unlock(); }
1221            }
1222        }
1223        LOG.trace("Completed force completion of all groups task");
1224
1225        if (total > 0) {
1226            LOG.debug("Forcing completion of all groups with {} exchanges", total);
1227        }
1228        return total;
1229    }
1230
1231}