001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor.aggregate;
018
019import java.util.ArrayList;
020import java.util.Collections;
021import java.util.LinkedHashSet;
022import java.util.List;
023import java.util.Map;
024import java.util.Set;
025import java.util.concurrent.ConcurrentHashMap;
026import java.util.concurrent.ConcurrentSkipListSet;
027import java.util.concurrent.ExecutorService;
028import java.util.concurrent.ScheduledExecutorService;
029import java.util.concurrent.TimeUnit;
030import java.util.concurrent.atomic.AtomicInteger;
031import java.util.concurrent.atomic.AtomicLong;
032import java.util.concurrent.locks.Lock;
033import java.util.concurrent.locks.ReentrantLock;
034
035import org.apache.camel.AsyncCallback;
036import org.apache.camel.AsyncProcessor;
037import org.apache.camel.CamelContext;
038import org.apache.camel.CamelExchangeException;
039import org.apache.camel.Endpoint;
040import org.apache.camel.Exchange;
041import org.apache.camel.Expression;
042import org.apache.camel.Navigate;
043import org.apache.camel.NoSuchEndpointException;
044import org.apache.camel.Predicate;
045import org.apache.camel.Processor;
046import org.apache.camel.ProducerTemplate;
047import org.apache.camel.ShutdownRunningTask;
048import org.apache.camel.TimeoutMap;
049import org.apache.camel.Traceable;
050import org.apache.camel.spi.AggregationRepository;
051import org.apache.camel.spi.ExceptionHandler;
052import org.apache.camel.spi.IdAware;
053import org.apache.camel.spi.OptimisticLockingAggregationRepository;
054import org.apache.camel.spi.RecoverableAggregationRepository;
055import org.apache.camel.spi.ShutdownAware;
056import org.apache.camel.spi.ShutdownPrepared;
057import org.apache.camel.spi.Synchronization;
058import org.apache.camel.support.DefaultTimeoutMap;
059import org.apache.camel.support.LoggingExceptionHandler;
060import org.apache.camel.support.ServiceSupport;
061import org.apache.camel.util.AsyncProcessorHelper;
062import org.apache.camel.util.ExchangeHelper;
063import org.apache.camel.util.LRUCache;
064import org.apache.camel.util.ObjectHelper;
065import org.apache.camel.util.ServiceHelper;
066import org.apache.camel.util.StopWatch;
067import org.apache.camel.util.TimeUtils;
068import org.slf4j.Logger;
069import org.slf4j.LoggerFactory;
070
071/**
072 * An implementation of the <a
073 * href="http://camel.apache.org/aggregator2.html">Aggregator</a>
074 * pattern where a batch of messages are processed (up to a maximum amount or
075 * until some timeout is reached) and messages for the same correlation key are
076 * combined together using some kind of {@link AggregationStrategy}
077 * (by default the latest message is used) to compress many message exchanges
078 * into a smaller number of exchanges.
079 * <p/>
080 * A good example of this is stock market data; you may be receiving 30,000
081 * messages/second and you may want to throttle it right down so that multiple
082 * messages for the same stock are combined (or just the latest message is used
083 * and older prices are discarded). Another idea is to combine line item messages
084 * together into a single invoice message.
085 */
086public class AggregateProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, ShutdownPrepared, ShutdownAware, IdAware {
087
088    public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker";
089
090    private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class);
091
092    private final Lock lock = new ReentrantLock();
093    private final CamelContext camelContext;
094    private final Processor processor;
095    private String id;
096    private AggregationStrategy aggregationStrategy;
097    private boolean preCompletion;
098    private Expression correlationExpression;
099    private AggregateController aggregateController;
100    private final ExecutorService executorService;
101    private final boolean shutdownExecutorService;
102    private OptimisticLockRetryPolicy optimisticLockRetryPolicy = new OptimisticLockRetryPolicy();
103    private ScheduledExecutorService timeoutCheckerExecutorService;
104    private boolean shutdownTimeoutCheckerExecutorService;
105    private ScheduledExecutorService recoverService;
106    // store correlation key -> exchange id in timeout map
107    private TimeoutMap<String, String> timeoutMap;
108    private ExceptionHandler exceptionHandler;
109    private AggregationRepository aggregationRepository;
110    private Map<String, String> closedCorrelationKeys;
111    private final Set<String> batchConsumerCorrelationKeys = new ConcurrentSkipListSet<String>();
112    private final Set<String> inProgressCompleteExchanges = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
113    private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<String, RedeliveryData>();
114
115    private final AggregateProcessorStatistics statistics = new Statistics();
116    private final AtomicLong totalIn = new AtomicLong();
117    private final AtomicLong totalCompleted = new AtomicLong();
118    private final AtomicLong completedBySize = new AtomicLong();
119    private final AtomicLong completedByStrategy = new AtomicLong();
120    private final AtomicLong completedByInterval = new AtomicLong();
121    private final AtomicLong completedByTimeout = new AtomicLong();
122    private final AtomicLong completedByPredicate = new AtomicLong();
123    private final AtomicLong completedByBatchConsumer = new AtomicLong();
124    private final AtomicLong completedByForce = new AtomicLong();
125
126    // keep booking about redelivery
127    private class RedeliveryData {
128        int redeliveryCounter;
129    }
130
131    private class Statistics implements AggregateProcessorStatistics {
132
133        private boolean statisticsEnabled = true;
134
135        public long getTotalIn() {
136            return totalIn.get();
137        }
138
139        public long getTotalCompleted() {
140            return totalCompleted.get();
141        }
142
143        public long getCompletedBySize() {
144            return completedBySize.get();
145        }
146
147        public long getCompletedByStrategy() {
148            return completedByStrategy.get();
149        }
150
151        public long getCompletedByInterval() {
152            return completedByInterval.get();
153        }
154
155        public long getCompletedByTimeout() {
156            return completedByTimeout.get();
157        }
158
159        public long getCompletedByPredicate() {
160            return completedByPredicate.get();
161        }
162
163        public long getCompletedByBatchConsumer() {
164            return completedByBatchConsumer.get();
165        }
166
167        public long getCompletedByForce() {
168            return completedByForce.get();
169        }
170
171        public void reset() {
172            totalIn.set(0);
173            totalCompleted.set(0);
174            completedBySize.set(0);
175            completedByStrategy.set(0);
176            completedByTimeout.set(0);
177            completedByPredicate.set(0);
178            completedByBatchConsumer.set(0);
179            completedByForce.set(0);
180        }
181
182        public boolean isStatisticsEnabled() {
183            return statisticsEnabled;
184        }
185
186        public void setStatisticsEnabled(boolean statisticsEnabled) {
187            this.statisticsEnabled = statisticsEnabled;
188        }
189    }
190
191    // options
192    private boolean ignoreInvalidCorrelationKeys;
193    private Integer closeCorrelationKeyOnCompletion;
194    private boolean parallelProcessing;
195    private boolean optimisticLocking;
196
197    // different ways to have completion triggered
198    private boolean eagerCheckCompletion;
199    private Predicate completionPredicate;
200    private long completionTimeout;
201    private Expression completionTimeoutExpression;
202    private long completionInterval;
203    private int completionSize;
204    private Expression completionSizeExpression;
205    private boolean completionFromBatchConsumer;
206    private AtomicInteger batchConsumerCounter = new AtomicInteger();
207    private boolean discardOnCompletionTimeout;
208    private boolean forceCompletionOnStop;
209    private boolean completeAllOnStop;
210
211    private ProducerTemplate deadLetterProducerTemplate;
212
213    public AggregateProcessor(CamelContext camelContext, Processor processor,
214                              Expression correlationExpression, AggregationStrategy aggregationStrategy,
215                              ExecutorService executorService, boolean shutdownExecutorService) {
216        ObjectHelper.notNull(camelContext, "camelContext");
217        ObjectHelper.notNull(processor, "processor");
218        ObjectHelper.notNull(correlationExpression, "correlationExpression");
219        ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy");
220        ObjectHelper.notNull(executorService, "executorService");
221        this.camelContext = camelContext;
222        this.processor = processor;
223        this.correlationExpression = correlationExpression;
224        this.aggregationStrategy = aggregationStrategy;
225        this.executorService = executorService;
226        this.shutdownExecutorService = shutdownExecutorService;
227        this.exceptionHandler = new LoggingExceptionHandler(camelContext, getClass());
228    }
229
230    @Override
231    public String toString() {
232        return "AggregateProcessor[to: " + processor + "]";
233    }
234
235    public String getTraceLabel() {
236        return "aggregate[" + correlationExpression + "]";
237    }
238
239    public List<Processor> next() {
240        if (!hasNext()) {
241            return null;
242        }
243        List<Processor> answer = new ArrayList<Processor>(1);
244        answer.add(processor);
245        return answer;
246    }
247
248    public boolean hasNext() {
249        return processor != null;
250    }
251
252    public String getId() {
253        return id;
254    }
255
256    public void setId(String id) {
257        this.id = id;
258    }
259
260    public void process(Exchange exchange) throws Exception {
261        AsyncProcessorHelper.process(this, exchange);
262    }
263
264    public boolean process(Exchange exchange, AsyncCallback callback) {
265        try {
266            doProcess(exchange);
267        } catch (Throwable e) {
268            exchange.setException(e);
269        }
270        callback.done(true);
271        return true;
272    }
273
274    protected void doProcess(Exchange exchange) throws Exception {
275
276        if (getStatistics().isStatisticsEnabled()) {
277            totalIn.incrementAndGet();
278        }
279
280        //check for the special header to force completion of all groups (and ignore the exchange otherwise)
281        boolean completeAllGroups = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class);
282        if (completeAllGroups) {
283            forceCompletionOfAllGroups();
284            return;
285        }
286
287        // compute correlation expression
288        String key = correlationExpression.evaluate(exchange, String.class);
289        if (ObjectHelper.isEmpty(key)) {
290            // we have a bad correlation key
291            if (isIgnoreInvalidCorrelationKeys()) {
292                LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
293                return;
294            } else {
295                throw new CamelExchangeException("Invalid correlation key", exchange);
296            }
297        }
298
299        // is the correlation key closed?
300        if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) {
301            throw new ClosedCorrelationKeyException(key, exchange);
302        }
303
304        // when optimist locking is enabled we keep trying until we succeed
305        if (optimisticLocking) {
306            List<Exchange> aggregated = null;
307            boolean exhaustedRetries = true;
308            int attempt = 0;
309            do {
310                attempt++;
311                // copy exchange, and do not share the unit of work
312                // the aggregated output runs in another unit of work
313                Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
314                try {
315                    aggregated = doAggregation(key, copy);
316                    exhaustedRetries = false;
317                    break;
318                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
319                    LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}",
320                              new Object[]{attempt, aggregationRepository, key, copy, e});
321                    optimisticLockRetryPolicy.doDelay(attempt);
322                }
323            } while (optimisticLockRetryPolicy.shouldRetry(attempt));
324
325            if (exhaustedRetries) {
326                throw new CamelExchangeException("Exhausted optimistic locking retry attempts, tried " + attempt + " times", exchange,
327                        new OptimisticLockingAggregationRepository.OptimisticLockingException());
328            } else if (aggregated != null) {
329                // we are completed so submit to completion
330                for (Exchange agg : aggregated) {
331                    onSubmitCompletion(key, agg);
332                }
333            }
334        } else {
335            // copy exchange, and do not share the unit of work
336            // the aggregated output runs in another unit of work
337            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
338
339            // when memory based then its fast using synchronized, but if the aggregation repository is IO
340            // bound such as JPA etc then concurrent aggregation per correlation key could
341            // improve performance as we can run aggregation repository get/add in parallel
342            List<Exchange> aggregated = null;
343            lock.lock();
344            try {
345                aggregated = doAggregation(key, copy);
346            } finally {
347                lock.unlock();
348            }
349
350            // we are completed so do that work outside the lock
351            if (aggregated != null) {
352                for (Exchange agg : aggregated) {
353                    onSubmitCompletion(key, agg);
354                }
355            }
356        }
357
358        // check for the special header to force completion of all groups (inclusive of the message)
359        boolean completeAllGroupsInclusive = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE, false, boolean.class);
360        if (completeAllGroupsInclusive) {
361            forceCompletionOfAllGroups();
362        }
363    }
364
365    /**
366     * Aggregates the exchange with the given correlation key
367     * <p/>
368     * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key
369     * in parallel.
370     * <p/>
371     * The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)}
372     * method which sends out the aggregated and completed {@link Exchange}.
373     *
374     * @param key      the correlation key
375     * @param newExchange the exchange
376     * @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete
377     * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
378     */
379    private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException {
380        LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
381
382        List<Exchange> list = new ArrayList<Exchange>();
383        String complete = null;
384
385        Exchange answer;
386        Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key);
387        Exchange oldExchange = originalExchange;
388
389        Integer size = 1;
390        if (oldExchange != null) {
391            // hack to support legacy AggregationStrategy's that modify and return the oldExchange, these will not
392            // working when using an identify based approach for optimistic locking like the MemoryAggregationRepository.
393            if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) {
394                oldExchange = originalExchange.copy();
395            }
396            size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class);
397            size++;
398        }
399
400        // prepare the exchanges for aggregation
401        ExchangeHelper.prepareAggregation(oldExchange, newExchange);
402
403        // check if we are pre complete
404        if (preCompletion) {
405            try {
406                // put the current aggregated size on the exchange so its avail during completion check
407                newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
408                complete = isPreCompleted(key, oldExchange, newExchange);
409                // make sure to track timeouts if not complete
410                if (complete == null) {
411                    trackTimeout(key, newExchange);
412                }
413                // remove it afterwards
414                newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
415            } catch (Throwable e) {
416                // must catch any exception from aggregation
417                throw new CamelExchangeException("Error occurred during preComplete", newExchange, e);
418            }
419        } else if (isEagerCheckCompletion()) {
420            // put the current aggregated size on the exchange so its avail during completion check
421            newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
422            complete = isCompleted(key, newExchange);
423            // make sure to track timeouts if not complete
424            if (complete == null) {
425                trackTimeout(key, newExchange);
426            }
427            // remove it afterwards
428            newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
429        }
430
431        if (preCompletion && complete != null) {
432            // need to pre complete the current group before we aggregate
433            doAggregationComplete(complete, list, key, originalExchange, oldExchange);
434            // as we complete the current group eager, we should indicate the new group is not complete
435            complete = null;
436            // and clear old/original exchange as we start on a new group
437            oldExchange = null;
438            originalExchange = null;
439            // and reset the size to 1
440            size = 1;
441            // make sure to track timeout as we just restart the correlation group when we are in pre completion mode
442            trackTimeout(key, newExchange);
443        }
444
445        // aggregate the exchanges
446        try {
447            answer = onAggregation(oldExchange, newExchange);
448        } catch (Throwable e) {
449            // must catch any exception from aggregation
450            throw new CamelExchangeException("Error occurred during aggregation", newExchange, e);
451        }
452        if (answer == null) {
453            throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange);
454        }
455
456        // update the aggregated size
457        answer.setProperty(Exchange.AGGREGATED_SIZE, size);
458
459        // maybe we should check completion after the aggregation
460        if (!preCompletion && !isEagerCheckCompletion()) {
461            complete = isCompleted(key, answer);
462            // make sure to track timeouts if not complete
463            if (complete == null) {
464                trackTimeout(key, newExchange);
465            }
466        }
467
468        if (complete == null) {
469            // only need to update aggregation repository if we are not complete
470            doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer);
471        } else {
472            // if we are complete then add the answer to the list
473            doAggregationComplete(complete, list, key, originalExchange, answer);
474        }
475
476        LOG.trace("onAggregation +++  end  +++ with correlation key: {}", key);
477        return list;
478    }
479
480    protected void doAggregationComplete(String complete, List<Exchange> list, String key, Exchange originalExchange, Exchange answer) {
481        if ("consumer".equals(complete)) {
482            for (String batchKey : batchConsumerCorrelationKeys) {
483                Exchange batchAnswer;
484                if (batchKey.equals(key)) {
485                    // skip the current aggregated key as we have already aggregated it and have the answer
486                    batchAnswer = answer;
487                } else {
488                    batchAnswer = aggregationRepository.get(camelContext, batchKey);
489                }
490
491                if (batchAnswer != null) {
492                    batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
493                    onCompletion(batchKey, originalExchange, batchAnswer, false);
494                    list.add(batchAnswer);
495                }
496            }
497            batchConsumerCorrelationKeys.clear();
498            // we have already submitted to completion, so answer should be null
499            answer = null;
500        } else if (answer != null) {
501            // we are complete for this exchange
502            answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
503            answer = onCompletion(key, originalExchange, answer, false);
504        }
505
506        if (answer != null) {
507            list.add(answer);
508        }
509    }
510
511    protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) {
512        LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", new Object[]{oldExchange, newExchange, key});
513        if (optimisticLocking) {
514            try {
515                ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange);
516            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
517                onOptimisticLockingFailure(oldExchange, newExchange);
518                throw e;
519            }
520        } else {
521            aggregationRepository.add(camelContext, key, newExchange);
522        }
523    }
524
525    protected void onOptimisticLockingFailure(Exchange oldExchange, Exchange newExchange) {
526        if (aggregationStrategy instanceof OptimisticLockingAwareAggregationStrategy) {
527            LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}",
528                      new Object[]{aggregationStrategy, oldExchange, newExchange});
529            ((OptimisticLockingAwareAggregationStrategy)aggregationStrategy).onOptimisticLockFailure(oldExchange, newExchange);
530        }
531    }
532
533    /**
534     * Tests whether the given exchanges is pre-complete or not
535     *
536     * @param key      the correlation key
537     * @param oldExchange   the existing exchange
538     * @param newExchange the incoming exchange
539     * @return <tt>null</tt> if not pre-completed, otherwise a String with the type that triggered the pre-completion
540     */
541    protected String isPreCompleted(String key, Exchange oldExchange, Exchange newExchange) {
542        boolean complete = false;
543        if (aggregationStrategy instanceof PreCompletionAwareAggregationStrategy) {
544            complete = ((PreCompletionAwareAggregationStrategy) aggregationStrategy).preComplete(oldExchange, newExchange);
545        }
546        return complete ? "strategy" : null;
547    }
548
549    /**
550     * Tests whether the given exchange is complete or not
551     *
552     * @param key      the correlation key
553     * @param exchange the incoming exchange
554     * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion
555     */
556    protected String isCompleted(String key, Exchange exchange) {
557        // batch consumer completion must always run first
558        if (isCompletionFromBatchConsumer()) {
559            batchConsumerCorrelationKeys.add(key);
560            batchConsumerCounter.incrementAndGet();
561            int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class);
562            if (size > 0 && batchConsumerCounter.intValue() >= size) {
563                // batch consumer is complete then reset the counter
564                batchConsumerCounter.set(0);
565                return "consumer";
566            }
567        }
568
569        if (exchange.getProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, false, boolean.class)) {
570            return "strategy";
571        }
572
573        if (getCompletionPredicate() != null) {
574            boolean answer = getCompletionPredicate().matches(exchange);
575            if (answer) {
576                return "predicate";
577            }
578        }
579
580        boolean sizeChecked = false;
581        if (getCompletionSizeExpression() != null) {
582            Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class);
583            if (value != null && value > 0) {
584                // mark as already checked size as expression takes precedence over static configured
585                sizeChecked = true;
586                int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
587                if (size >= value) {
588                    return "size";
589                }
590            }
591        }
592        if (!sizeChecked && getCompletionSize() > 0) {
593            int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
594            if (size >= getCompletionSize()) {
595                return "size";
596            }
597        }
598
599        // not complete
600        return null;
601    }
602
603    protected void trackTimeout(String key, Exchange exchange) {
604        // timeout can be either evaluated based on an expression or from a fixed value
605        // expression takes precedence
606        boolean timeoutSet = false;
607        if (getCompletionTimeoutExpression() != null) {
608            Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class);
609            if (value != null && value > 0) {
610                if (LOG.isTraceEnabled()) {
611                    LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
612                            new Object[]{key, value, exchange});
613                }
614                addExchangeToTimeoutMap(key, exchange, value);
615                timeoutSet = true;
616            }
617        }
618        if (!timeoutSet && getCompletionTimeout() > 0) {
619            // timeout is used so use the timeout map to keep an eye on this
620            if (LOG.isTraceEnabled()) {
621                LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
622                        new Object[]{key, getCompletionTimeout(), exchange});
623            }
624            addExchangeToTimeoutMap(key, exchange, getCompletionTimeout());
625        }
626    }
627
628    protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) {
629        return aggregationStrategy.aggregate(oldExchange, newExchange);
630    }
631
632    protected boolean onPreCompletionAggregation(Exchange oldExchange, Exchange newExchange) {
633        if (aggregationStrategy instanceof PreCompletionAwareAggregationStrategy) {
634            return ((PreCompletionAwareAggregationStrategy) aggregationStrategy).preComplete(oldExchange, newExchange);
635        }
636        return false;
637    }
638
639    protected Exchange onCompletion(final String key, final Exchange original, final Exchange aggregated, boolean fromTimeout) {
640        // store the correlation key as property before we remove so the repository has that information
641        if (original != null) {
642            original.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
643        }
644        aggregated.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
645
646        // only remove if we have previous added (as we could potentially complete with only 1 exchange)
647        // (if we have previous added then we have that as the original exchange)
648        if (original != null) {
649            // remove from repository as its completed, we do this first as to trigger any OptimisticLockingException's
650            aggregationRepository.remove(aggregated.getContext(), key, original);
651        }
652
653        if (!fromTimeout && timeoutMap != null) {
654            // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker)
655            LOG.trace("Removing correlation key {} from timeout", key);
656            timeoutMap.remove(key);
657        }
658
659        // this key has been closed so add it to the closed map
660        if (closedCorrelationKeys != null) {
661            closedCorrelationKeys.put(key, key);
662        }
663
664        if (fromTimeout) {
665            // invoke timeout if its timeout aware aggregation strategy,
666            // to allow any custom processing before discarding the exchange
667            if (aggregationStrategy instanceof TimeoutAwareAggregationStrategy) {
668                long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1;
669                ((TimeoutAwareAggregationStrategy) aggregationStrategy).timeout(aggregated, -1, -1, timeout);
670            }
671        }
672
673        Exchange answer;
674        if (fromTimeout && isDiscardOnCompletionTimeout()) {
675            // discard due timeout
676            LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated);
677            // must confirm the discarded exchange
678            aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId());
679            // and remove redelivery state as well
680            redeliveryState.remove(aggregated.getExchangeId());
681            // the completion was from timeout and we should just discard it
682            answer = null;
683        } else {
684            // the aggregated exchange should be published (sent out)
685            answer = aggregated;
686        }
687
688        return answer;
689    }
690
691    private void onSubmitCompletion(final String key, final Exchange exchange) {
692        LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
693
694        // add this as in progress before we submit the task
695        inProgressCompleteExchanges.add(exchange.getExchangeId());
696
697        // invoke the on completion callback
698        if (aggregationStrategy instanceof CompletionAwareAggregationStrategy) {
699            ((CompletionAwareAggregationStrategy) aggregationStrategy).onCompletion(exchange);
700        }
701
702        if (getStatistics().isStatisticsEnabled()) {
703            totalCompleted.incrementAndGet();
704
705            String completedBy = exchange.getProperty(Exchange.AGGREGATED_COMPLETED_BY, String.class);
706            if ("interval".equals(completedBy)) {
707                completedByInterval.incrementAndGet();
708            } else if ("timeout".equals(completedBy)) {
709                completedByTimeout.incrementAndGet();
710            } else if ("force".equals(completedBy)) {
711                completedByForce.incrementAndGet();
712            } else if ("consumer".equals(completedBy)) {
713                completedByBatchConsumer.incrementAndGet();
714            } else if ("predicate".equals(completedBy)) {
715                completedByPredicate.incrementAndGet();
716            } else if ("size".equals(completedBy)) {
717                completedBySize.incrementAndGet();
718            } else if ("strategy".equals(completedBy)) {
719                completedByStrategy.incrementAndGet();
720            }
721        }
722
723        // send this exchange
724        executorService.submit(new Runnable() {
725            public void run() {
726                LOG.debug("Processing aggregated exchange: {}", exchange);
727
728                // add on completion task so we remember to update the inProgressCompleteExchanges
729                exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId()));
730
731                try {
732                    processor.process(exchange);
733                } catch (Throwable e) {
734                    exchange.setException(e);
735                }
736
737                // log exception if there was a problem
738                if (exchange.getException() != null) {
739                    // if there was an exception then let the exception handler handle it
740                    getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException());
741                } else {
742                    LOG.trace("Processing aggregated exchange: {} complete.", exchange);
743                }
744            }
745        });
746    }
747
748    /**
749     * Restores the timeout map with timeout values from the aggregation repository.
750     * <p/>
751     * This is needed in case the aggregator has been stopped and started again (for example a server restart).
752     * Then the existing exchanges from the {@link AggregationRepository} must have their timeout conditions restored.
753     */
754    protected void restoreTimeoutMapFromAggregationRepository() throws Exception {
755        // grab the timeout value for each partly aggregated exchange
756        Set<String> keys = aggregationRepository.getKeys();
757        if (keys == null || keys.isEmpty()) {
758            return;
759        }
760
761        StopWatch watch = new StopWatch();
762        LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
763
764        for (String key : keys) {
765            Exchange exchange = aggregationRepository.get(camelContext, key);
766            // grab the timeout value
767            long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0;
768            if (timeout > 0) {
769                LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
770                addExchangeToTimeoutMap(key, exchange, timeout);
771            }
772        }
773
774        // log duration of this task so end user can see how long it takes to pre-check this upon starting
775        LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
776                timeoutMap.size(), TimeUtils.printDuration(watch.stop()));
777    }
778
779    /**
780     * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts.
781     *
782     * @param key      the correlation key
783     * @param exchange the exchange
784     * @param timeout  the timeout value in millis
785     */
786    private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) {
787        // store the timeout value on the exchange as well, in case we need it later
788        exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout);
789        timeoutMap.put(key, exchange.getExchangeId(), timeout);
790    }
791
792    /**
793     * Current number of closed correlation keys in the memory cache
794     */
795    public int getClosedCorrelationKeysCacheSize() {
796        if (closedCorrelationKeys != null) {
797            return closedCorrelationKeys.size();
798        } else {
799            return 0;
800        }
801    }
802
803    /**
804     * Clear all the closed correlation keys stored in the cache
805     */
806    public void clearClosedCorrelationKeysCache() {
807        if (closedCorrelationKeys != null) {
808            closedCorrelationKeys.clear();
809        }
810    }
811
812    public AggregateProcessorStatistics getStatistics() {
813        return statistics;
814    }
815
816    public int getInProgressCompleteExchanges() {
817        return inProgressCompleteExchanges.size();
818    }
819
820    public Predicate getCompletionPredicate() {
821        return completionPredicate;
822    }
823
824    public void setCompletionPredicate(Predicate completionPredicate) {
825        this.completionPredicate = completionPredicate;
826    }
827
828    public boolean isEagerCheckCompletion() {
829        return eagerCheckCompletion;
830    }
831
832    public void setEagerCheckCompletion(boolean eagerCheckCompletion) {
833        this.eagerCheckCompletion = eagerCheckCompletion;
834    }
835
836    public long getCompletionTimeout() {
837        return completionTimeout;
838    }
839
840    public void setCompletionTimeout(long completionTimeout) {
841        this.completionTimeout = completionTimeout;
842    }
843
844    public Expression getCompletionTimeoutExpression() {
845        return completionTimeoutExpression;
846    }
847
848    public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) {
849        this.completionTimeoutExpression = completionTimeoutExpression;
850    }
851
852    public long getCompletionInterval() {
853        return completionInterval;
854    }
855
856    public void setCompletionInterval(long completionInterval) {
857        this.completionInterval = completionInterval;
858    }
859
860    public int getCompletionSize() {
861        return completionSize;
862    }
863
864    public void setCompletionSize(int completionSize) {
865        this.completionSize = completionSize;
866    }
867
868    public Expression getCompletionSizeExpression() {
869        return completionSizeExpression;
870    }
871
872    public void setCompletionSizeExpression(Expression completionSizeExpression) {
873        this.completionSizeExpression = completionSizeExpression;
874    }
875
876    public boolean isIgnoreInvalidCorrelationKeys() {
877        return ignoreInvalidCorrelationKeys;
878    }
879
880    public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) {
881        this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys;
882    }
883
884    public Integer getCloseCorrelationKeyOnCompletion() {
885        return closeCorrelationKeyOnCompletion;
886    }
887
888    public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) {
889        this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion;
890    }
891
892    public boolean isCompletionFromBatchConsumer() {
893        return completionFromBatchConsumer;
894    }
895
896    public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) {
897        this.completionFromBatchConsumer = completionFromBatchConsumer;
898    }
899
900    public boolean isCompleteAllOnStop() {
901        return completeAllOnStop;
902    }
903
904    public ExceptionHandler getExceptionHandler() {
905        return exceptionHandler;
906    }
907
908    public void setExceptionHandler(ExceptionHandler exceptionHandler) {
909        this.exceptionHandler = exceptionHandler;
910    }
911
912    public boolean isParallelProcessing() {
913        return parallelProcessing;
914    }
915
916    public void setParallelProcessing(boolean parallelProcessing) {
917        this.parallelProcessing = parallelProcessing;
918    }
919
920    public boolean isOptimisticLocking() {
921        return optimisticLocking;
922    }
923
924    public void setOptimisticLocking(boolean optimisticLocking) {
925        this.optimisticLocking = optimisticLocking;
926    }
927
928    public AggregationRepository getAggregationRepository() {
929        return aggregationRepository;
930    }
931
932    public void setAggregationRepository(AggregationRepository aggregationRepository) {
933        this.aggregationRepository = aggregationRepository;
934    }
935
936    public boolean isDiscardOnCompletionTimeout() {
937        return discardOnCompletionTimeout;
938    }
939
940    public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) {
941        this.discardOnCompletionTimeout = discardOnCompletionTimeout;
942    }
943
944    public void setForceCompletionOnStop(boolean forceCompletionOnStop) {
945        this.forceCompletionOnStop = forceCompletionOnStop;
946    }
947
948    public void setCompleteAllOnStop(boolean completeAllOnStop) {
949        this.completeAllOnStop = completeAllOnStop;
950    }
951
952    public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) {
953        this.timeoutCheckerExecutorService = timeoutCheckerExecutorService;
954    }
955
956    public ScheduledExecutorService getTimeoutCheckerExecutorService() {
957        return timeoutCheckerExecutorService;
958    }
959
960    public boolean isShutdownTimeoutCheckerExecutorService() {
961        return shutdownTimeoutCheckerExecutorService;
962    }
963
964    public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) {
965        this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService;
966    }
967
968    public void setOptimisticLockRetryPolicy(OptimisticLockRetryPolicy optimisticLockRetryPolicy) {
969        this.optimisticLockRetryPolicy = optimisticLockRetryPolicy;
970    }
971
972    public OptimisticLockRetryPolicy getOptimisticLockRetryPolicy() {
973        return optimisticLockRetryPolicy;
974    }
975
976    public AggregationStrategy getAggregationStrategy() {
977        return aggregationStrategy;
978    }
979
980    public void setAggregationStrategy(AggregationStrategy aggregationStrategy) {
981        this.aggregationStrategy = aggregationStrategy;
982    }
983
984    public Expression getCorrelationExpression() {
985        return correlationExpression;
986    }
987
988    public void setCorrelationExpression(Expression correlationExpression) {
989        this.correlationExpression = correlationExpression;
990    }
991
992    public AggregateController getAggregateController() {
993        return aggregateController;
994    }
995
996    public void setAggregateController(AggregateController aggregateController) {
997        this.aggregateController = aggregateController;
998    }
999
1000    /**
1001     * On completion task which keeps the booking of the in progress up to date
1002     */
1003    private final class AggregateOnCompletion implements Synchronization {
1004        private final String exchangeId;
1005
1006        private AggregateOnCompletion(String exchangeId) {
1007            // must use the original exchange id as it could potentially change if send over SEDA etc.
1008            this.exchangeId = exchangeId;
1009        }
1010
1011        public void onFailure(Exchange exchange) {
1012            LOG.trace("Aggregated exchange onFailure: {}", exchange);
1013
1014            // must remember to remove in progress when we failed
1015            inProgressCompleteExchanges.remove(exchangeId);
1016            // do not remove redelivery state as we need it when we redeliver again later
1017        }
1018
1019        public void onComplete(Exchange exchange) {
1020            LOG.trace("Aggregated exchange onComplete: {}", exchange);
1021
1022            // only confirm if we processed without a problem
1023            try {
1024                aggregationRepository.confirm(exchange.getContext(), exchangeId);
1025                // and remove redelivery state as well
1026                redeliveryState.remove(exchangeId);
1027            } finally {
1028                // must remember to remove in progress when we are complete
1029                inProgressCompleteExchanges.remove(exchangeId);
1030            }
1031        }
1032
1033        @Override
1034        public String toString() {
1035            return "AggregateOnCompletion";
1036        }
1037    }
1038
1039    /**
1040     * Background task that looks for aggregated exchanges which is triggered by completion timeouts.
1041     */
1042    private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> {
1043
1044        private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) {
1045            // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead
1046            super(executor, requestMapPollTimeMillis, optimisticLocking);
1047        }
1048
1049        @Override
1050        public void purge() {
1051            // must acquire the shared aggregation lock to be able to purge
1052            if (!optimisticLocking) {
1053                lock.lock();
1054            }
1055            try {
1056                super.purge();
1057            } finally {
1058                if (!optimisticLocking) {
1059                    lock.unlock();
1060                }
1061            }
1062        }
1063
1064        @Override
1065        public boolean onEviction(String key, String exchangeId) {
1066            log.debug("Completion timeout triggered for correlation key: {}", key);
1067
1068            boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
1069            if (inProgress) {
1070                LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
1071                return true;
1072            }
1073
1074            // get the aggregated exchange
1075            boolean evictionStolen = false;
1076            Exchange answer = aggregationRepository.get(camelContext, key);
1077            if (answer == null) {
1078                evictionStolen = true;
1079            } else {
1080                // indicate it was completed by timeout
1081                answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout");
1082                try {
1083                    answer = onCompletion(key, answer, answer, true);
1084                    if (answer != null) {
1085                        onSubmitCompletion(key, answer);
1086                    }
1087                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
1088                    evictionStolen = true;
1089                }
1090            }
1091
1092            if (optimisticLocking && evictionStolen) {
1093                LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction "
1094                          + "for exchange with id: {} and correlation id: {}", exchangeId, key);
1095            }
1096            return true;
1097        }
1098    }
1099
1100    /**
1101     * Background task that triggers completion based on interval.
1102     */
1103    private final class AggregationIntervalTask implements Runnable {
1104
1105        public void run() {
1106            // only run if CamelContext has been fully started
1107            if (!camelContext.getStatus().isStarted()) {
1108                LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
1109                return;
1110            }
1111
1112            LOG.trace("Starting completion interval task");
1113
1114            // trigger completion for all in the repository
1115            Set<String> keys = aggregationRepository.getKeys();
1116
1117            if (keys != null && !keys.isEmpty()) {
1118                // must acquire the shared aggregation lock to be able to trigger interval completion
1119                if (!optimisticLocking) {
1120                    lock.lock();
1121                }
1122                try {
1123                    for (String key : keys) {
1124                        boolean stolenInterval = false;
1125                        Exchange exchange = aggregationRepository.get(camelContext, key);
1126                        if (exchange == null) {
1127                            stolenInterval = true;
1128                        } else {
1129                            LOG.trace("Completion interval triggered for correlation key: {}", key);
1130                            // indicate it was completed by interval
1131                            exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval");
1132                            try {
1133                                Exchange answer = onCompletion(key, exchange, exchange, false);
1134                                if (answer != null) {
1135                                    onSubmitCompletion(key, answer);
1136                                }
1137                            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
1138                                stolenInterval = true;
1139                            }
1140                        }
1141                        if (optimisticLocking && stolenInterval) {
1142                            LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key);
1143                        }
1144                    }
1145                } finally {
1146                    if (!optimisticLocking) {
1147                        lock.unlock();
1148                    }
1149                }
1150            }
1151
1152            LOG.trace("Completion interval task complete");
1153        }
1154    }
1155
1156    /**
1157     * Background task that looks for aggregated exchanges to recover.
1158     */
1159    private final class RecoverTask implements Runnable {
1160        private final RecoverableAggregationRepository recoverable;
1161
1162        private RecoverTask(RecoverableAggregationRepository recoverable) {
1163            this.recoverable = recoverable;
1164        }
1165
1166        public void run() {
1167            // only run if CamelContext has been fully started
1168            if (!camelContext.getStatus().isStarted()) {
1169                LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
1170                return;
1171            }
1172
1173            LOG.trace("Starting recover check");
1174
1175            // copy the current in progress before doing scan
1176            final Set<String> copyOfInProgress = new LinkedHashSet<String>(inProgressCompleteExchanges);
1177
1178            Set<String> exchangeIds = recoverable.scan(camelContext);
1179            for (String exchangeId : exchangeIds) {
1180
1181                // we may shutdown while doing recovery
1182                if (!isRunAllowed()) {
1183                    LOG.info("We are shutting down so stop recovering");
1184                    return;
1185                }
1186
1187                // consider in progress if it was in progress before we did the scan, or currently after we did the scan
1188                // its safer to consider it in progress than risk duplicates due both in progress + recovered
1189                boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId);
1190                if (inProgress) {
1191                    LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
1192                } else {
1193                    LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
1194                    Exchange exchange = recoverable.recover(camelContext, exchangeId);
1195                    if (exchange != null) {
1196                        // get the correlation key
1197                        String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class);
1198                        // and mark it as redelivered
1199                        exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE);
1200
1201                        // get the current redelivery data
1202                        RedeliveryData data = redeliveryState.get(exchange.getExchangeId());
1203
1204                        // if we are exhausted, then move to dead letter channel
1205                        if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) {
1206                            LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
1207                                    + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri());
1208
1209                            // send to DLC
1210                            try {
1211                                // set redelivery counter
1212                                exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1213                                exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE);
1214                                deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange);
1215                            } catch (Throwable e) {
1216                                exchange.setException(e);
1217                            }
1218
1219                            // handle if failed
1220                            if (exchange.getException() != null) {
1221                                getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException());
1222                            } else {
1223                                // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again
1224                                recoverable.confirm(camelContext, exchangeId);
1225                            }
1226                        } else {
1227                            // update current redelivery state
1228                            if (data == null) {
1229                                // create new data
1230                                data = new RedeliveryData();
1231                                redeliveryState.put(exchange.getExchangeId(), data);
1232                            }
1233                            data.redeliveryCounter++;
1234
1235                            // set redelivery counter
1236                            exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1237                            if (recoverable.getMaximumRedeliveries() > 0) {
1238                                exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries());
1239                            }
1240
1241                            LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
1242
1243                            // not exhaust so resubmit the recovered exchange
1244                            onSubmitCompletion(key, exchange);
1245                        }
1246                    }
1247                }
1248            }
1249
1250            LOG.trace("Recover check complete");
1251        }
1252    }
1253
1254    @Override
1255    protected void doStart() throws Exception {
1256        if (aggregationStrategy instanceof PreCompletionAwareAggregationStrategy) {
1257            preCompletion = true;
1258            LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId());
1259        }
1260
1261        if (!preCompletion) {
1262            // if not in pre completion mode then check we configured the completion required
1263            if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null
1264                    && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null
1265                    && getCompletionSizeExpression() == null) {
1266                throw new IllegalStateException("At least one of the completions options"
1267                        + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set");
1268            }
1269        }
1270
1271        if (getCloseCorrelationKeyOnCompletion() != null) {
1272            if (getCloseCorrelationKeyOnCompletion() > 0) {
1273                LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of " + getCloseCorrelationKeyOnCompletion());
1274                closedCorrelationKeys = new LRUCache<String, String>(getCloseCorrelationKeyOnCompletion());
1275            } else {
1276                LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
1277                closedCorrelationKeys = new ConcurrentHashMap<String, String>();
1278            }
1279        }
1280
1281        if (aggregationRepository == null) {
1282            aggregationRepository = new MemoryAggregationRepository(optimisticLocking);
1283            LOG.info("Defaulting to MemoryAggregationRepository");
1284        }
1285
1286        if (optimisticLocking) {
1287            if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) {
1288                throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository");
1289            }
1290            LOG.info("Optimistic locking is enabled");
1291        }
1292
1293        ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository);
1294
1295        // should we use recover checker
1296        if (aggregationRepository instanceof RecoverableAggregationRepository) {
1297            RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository;
1298            if (recoverable.isUseRecovery()) {
1299                long interval = recoverable.getRecoveryIntervalInMillis();
1300                if (interval <= 0) {
1301                    throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval);
1302                }
1303
1304                // create a background recover thread to check every interval
1305                recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
1306                Runnable recoverTask = new RecoverTask(recoverable);
1307                LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every " + interval + " millis.");
1308                // use fixed delay so there is X interval between each run
1309                recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
1310
1311                if (recoverable.getDeadLetterUri() != null) {
1312                    int max = recoverable.getMaximumRedeliveries();
1313                    if (max <= 0) {
1314                        throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
1315                    }
1316                    LOG.info("After " + max + " failed redelivery attempts Exchanges will be moved to deadLetterUri: " + recoverable.getDeadLetterUri());
1317
1318                    // dead letter uri must be a valid endpoint
1319                    Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
1320                    if (endpoint == null) {
1321                        throw new NoSuchEndpointException(recoverable.getDeadLetterUri());
1322                    }
1323                    deadLetterProducerTemplate = camelContext.createProducerTemplate();
1324                }
1325            }
1326        }
1327
1328        if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) {
1329            throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
1330        }
1331        if (getCompletionInterval() > 0) {
1332            LOG.info("Using CompletionInterval to run every " + getCompletionInterval() + " millis.");
1333            if (getTimeoutCheckerExecutorService() == null) {
1334                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1335                shutdownTimeoutCheckerExecutorService = true;
1336            }
1337            // trigger completion based on interval
1338            getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS);
1339        }
1340
1341        // start timeout service if its in use
1342        if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
1343            LOG.info("Using CompletionTimeout to trigger after " + getCompletionTimeout() + " millis of inactivity.");
1344            if (getTimeoutCheckerExecutorService() == null) {
1345                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1346                shutdownTimeoutCheckerExecutorService = true;
1347            }
1348            // check for timed out aggregated messages once every second
1349            timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), 1000L);
1350            // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we
1351            // need to re-establish the timeout map so timeout can trigger
1352            restoreTimeoutMapFromAggregationRepository();
1353            ServiceHelper.startService(timeoutMap);
1354        }
1355
1356        if (aggregateController == null) {
1357            aggregateController = new DefaultAggregateController();
1358        }
1359        aggregateController.onStart(this);
1360    }
1361
1362    @Override
1363    protected void doStop() throws Exception {
1364        // note: we cannot do doForceCompletionOnStop from this doStop method
1365        // as this is handled in the prepareShutdown method which is also invoked when stopping a route
1366        // and is better suited for preparing to shutdown than this doStop method is
1367
1368        if (aggregateController != null) {
1369            aggregateController.onStop(this);
1370        }
1371
1372        if (recoverService != null) {
1373            camelContext.getExecutorServiceManager().shutdown(recoverService);
1374        }
1375        ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate);
1376
1377        if (closedCorrelationKeys != null) {
1378            // it may be a service so stop it as well
1379            ServiceHelper.stopService(closedCorrelationKeys);
1380            closedCorrelationKeys.clear();
1381        }
1382        batchConsumerCorrelationKeys.clear();
1383        redeliveryState.clear();
1384    }
1385
1386    @Override
1387    public void prepareShutdown(boolean suspendOnly, boolean forced) {
1388        // we are shutting down, so force completion if this option was enabled
1389        // but only do this when forced=false, as that is when we have chance to
1390        // send out new messages to be routed by Camel. When forced=true, then
1391        // we have to shutdown in a hurry
1392        if (!forced && forceCompletionOnStop) {
1393            doForceCompletionOnStop();
1394        }
1395    }
1396
1397    @Override
1398    public boolean deferShutdown(ShutdownRunningTask shutdownRunningTask) {
1399        // not in use
1400        return true;
1401    }
1402
1403    @Override
1404    public int getPendingExchangesSize() {
1405        if (completeAllOnStop) {
1406            // we want to regard all pending exchanges in the repo as inflight
1407            Set<String> keys = getAggregationRepository().getKeys();
1408            return keys != null ? keys.size() : 0;
1409        } else {
1410            return 0;
1411        }
1412    }
1413
1414    private void doForceCompletionOnStop() {
1415        int expected = forceCompletionOfAllGroups();
1416
1417        StopWatch watch = new StopWatch();
1418        while (inProgressCompleteExchanges.size() > 0) {
1419            LOG.trace("Waiting for {} inflight exchanges to complete", inProgressCompleteExchanges.size());
1420            try {
1421                Thread.sleep(100);
1422            } catch (InterruptedException e) {
1423                // break out as we got interrupted such as the JVM terminating
1424                LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", inProgressCompleteExchanges.size());
1425                break;
1426            }
1427        }
1428
1429        if (expected > 0) {
1430            LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.stop()));
1431        }
1432    }
1433
1434    @Override
1435    protected void doShutdown() throws Exception {
1436        // shutdown aggregation repository and the strategy
1437        ServiceHelper.stopAndShutdownServices(aggregationRepository, aggregationStrategy);
1438
1439        // cleanup when shutting down
1440        inProgressCompleteExchanges.clear();
1441
1442        if (shutdownExecutorService) {
1443            camelContext.getExecutorServiceManager().shutdownNow(executorService);
1444        }
1445        if (shutdownTimeoutCheckerExecutorService) {
1446            camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService);
1447            timeoutCheckerExecutorService = null;
1448        }
1449
1450        super.doShutdown();
1451    }
1452
1453    public int forceCompletionOfGroup(String key) {
1454        // must acquire the shared aggregation lock to be able to trigger force completion
1455        int total = 0;
1456
1457        if (!optimisticLocking) {
1458            lock.lock();
1459        }
1460        try {
1461            Exchange exchange = aggregationRepository.get(camelContext, key);
1462            if (exchange != null) {
1463                total = 1;
1464                LOG.trace("Force completion triggered for correlation key: {}", key);
1465                // indicate it was completed by a force completion request
1466                exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
1467                Exchange answer = onCompletion(key, exchange, exchange, false);
1468                if (answer != null) {
1469                    onSubmitCompletion(key, answer);
1470                }
1471            }
1472        } finally {
1473            if (!optimisticLocking) {
1474                lock.unlock(); 
1475            }
1476        }
1477        LOG.trace("Completed force completion of group {}", key);
1478
1479        if (total > 0) {
1480            LOG.debug("Forcing completion of group {} with {} exchanges", key, total);
1481        }
1482        return total;
1483    }
1484
1485    public int forceCompletionOfAllGroups() {
1486
1487        // only run if CamelContext has been fully started or is stopping
1488        boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping();
1489        if (!allow) {
1490            LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
1491            return 0;
1492        }
1493
1494        LOG.trace("Starting force completion of all groups task");
1495
1496        // trigger completion for all in the repository
1497        Set<String> keys = aggregationRepository.getKeys();
1498
1499        int total = 0;
1500        if (keys != null && !keys.isEmpty()) {
1501            // must acquire the shared aggregation lock to be able to trigger force completion
1502            if (!optimisticLocking) {
1503                lock.lock(); 
1504            }
1505            total = keys.size();
1506            try {
1507                for (String key : keys) {
1508                    Exchange exchange = aggregationRepository.get(camelContext, key);
1509                    if (exchange != null) {
1510                        LOG.trace("Force completion triggered for correlation key: {}", key);
1511                        // indicate it was completed by a force completion request
1512                        exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
1513                        Exchange answer = onCompletion(key, exchange, exchange, false);
1514                        if (answer != null) {
1515                            onSubmitCompletion(key, answer);
1516                        }
1517                    }
1518                }
1519            } finally {
1520                if (!optimisticLocking) {
1521                    lock.unlock();
1522                }
1523            }
1524        }
1525        LOG.trace("Completed force completion of all groups task");
1526
1527        if (total > 0) {
1528            LOG.debug("Forcing completion of all groups with {} exchanges", total);
1529        }
1530        return total;
1531    }
1532
1533}