001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor.aggregate;
018
019import java.util.ArrayList;
020import java.util.Collections;
021import java.util.LinkedHashSet;
022import java.util.List;
023import java.util.Map;
024import java.util.Set;
025import java.util.concurrent.ConcurrentHashMap;
026import java.util.concurrent.ConcurrentSkipListSet;
027import java.util.concurrent.ExecutorService;
028import java.util.concurrent.ScheduledExecutorService;
029import java.util.concurrent.TimeUnit;
030import java.util.concurrent.atomic.AtomicBoolean;
031import java.util.concurrent.atomic.AtomicInteger;
032import java.util.concurrent.atomic.AtomicLong;
033import java.util.concurrent.locks.Lock;
034import java.util.concurrent.locks.ReentrantLock;
035
036import org.apache.camel.AsyncCallback;
037import org.apache.camel.AsyncProcessor;
038import org.apache.camel.CamelContext;
039import org.apache.camel.CamelContextAware;
040import org.apache.camel.CamelExchangeException;
041import org.apache.camel.Endpoint;
042import org.apache.camel.Exchange;
043import org.apache.camel.Expression;
044import org.apache.camel.Navigate;
045import org.apache.camel.NoSuchEndpointException;
046import org.apache.camel.Predicate;
047import org.apache.camel.Processor;
048import org.apache.camel.ProducerTemplate;
049import org.apache.camel.ShutdownRunningTask;
050import org.apache.camel.TimeoutMap;
051import org.apache.camel.Traceable;
052import org.apache.camel.spi.AggregationRepository;
053import org.apache.camel.spi.ExceptionHandler;
054import org.apache.camel.spi.IdAware;
055import org.apache.camel.spi.OptimisticLockingAggregationRepository;
056import org.apache.camel.spi.RecoverableAggregationRepository;
057import org.apache.camel.spi.ShutdownAware;
058import org.apache.camel.spi.ShutdownPrepared;
059import org.apache.camel.spi.Synchronization;
060import org.apache.camel.support.DefaultTimeoutMap;
061import org.apache.camel.support.LoggingExceptionHandler;
062import org.apache.camel.support.ServiceSupport;
063import org.apache.camel.util.AsyncProcessorHelper;
064import org.apache.camel.util.ExchangeHelper;
065import org.apache.camel.util.LRUCache;
066import org.apache.camel.util.ObjectHelper;
067import org.apache.camel.util.ServiceHelper;
068import org.apache.camel.util.StopWatch;
069import org.apache.camel.util.TimeUtils;
070import org.slf4j.Logger;
071import org.slf4j.LoggerFactory;
072
073/**
074 * An implementation of the <a
075 * href="http://camel.apache.org/aggregator2.html">Aggregator</a>
076 * pattern where a batch of messages are processed (up to a maximum amount or
077 * until some timeout is reached) and messages for the same correlation key are
078 * combined together using some kind of {@link AggregationStrategy}
079 * (by default the latest message is used) to compress many message exchanges
080 * into a smaller number of exchanges.
081 * <p/>
082 * A good example of this is stock market data; you may be receiving 30,000
083 * messages/second and you may want to throttle it right down so that multiple
084 * messages for the same stock are combined (or just the latest message is used
085 * and older prices are discarded). Another idea is to combine line item messages
086 * together into a single invoice message.
087 */
088public class AggregateProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, ShutdownPrepared, ShutdownAware, IdAware {
089
090    public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker";
091
092    private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class);
093
094    private final Lock lock = new ReentrantLock();
095    private final AtomicBoolean aggregateRepositoryWarned = new AtomicBoolean();
096    private final CamelContext camelContext;
097    private final Processor processor;
098    private String id;
099    private AggregationStrategy aggregationStrategy;
100    private boolean preCompletion;
101    private Expression correlationExpression;
102    private AggregateController aggregateController;
103    private final ExecutorService executorService;
104    private final boolean shutdownExecutorService;
105    private OptimisticLockRetryPolicy optimisticLockRetryPolicy = new OptimisticLockRetryPolicy();
106    private ScheduledExecutorService timeoutCheckerExecutorService;
107    private boolean shutdownTimeoutCheckerExecutorService;
108    private ScheduledExecutorService recoverService;
109    // store correlation key -> exchange id in timeout map
110    private TimeoutMap<String, String> timeoutMap;
111    private ExceptionHandler exceptionHandler;
112    private AggregationRepository aggregationRepository;
113    private Map<String, String> closedCorrelationKeys;
114    private final Set<String> batchConsumerCorrelationKeys = new ConcurrentSkipListSet<String>();
115    private final Set<String> inProgressCompleteExchanges = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
116    private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<String, RedeliveryData>();
117
118    private final AggregateProcessorStatistics statistics = new Statistics();
119    private final AtomicLong totalIn = new AtomicLong();
120    private final AtomicLong totalCompleted = new AtomicLong();
121    private final AtomicLong completedBySize = new AtomicLong();
122    private final AtomicLong completedByStrategy = new AtomicLong();
123    private final AtomicLong completedByInterval = new AtomicLong();
124    private final AtomicLong completedByTimeout = new AtomicLong();
125    private final AtomicLong completedByPredicate = new AtomicLong();
126    private final AtomicLong completedByBatchConsumer = new AtomicLong();
127    private final AtomicLong completedByForce = new AtomicLong();
128
129    // keep booking about redelivery
130    private class RedeliveryData {
131        int redeliveryCounter;
132    }
133
134    private class Statistics implements AggregateProcessorStatistics {
135
136        private boolean statisticsEnabled = true;
137
138        public long getTotalIn() {
139            return totalIn.get();
140        }
141
142        public long getTotalCompleted() {
143            return totalCompleted.get();
144        }
145
146        public long getCompletedBySize() {
147            return completedBySize.get();
148        }
149
150        public long getCompletedByStrategy() {
151            return completedByStrategy.get();
152        }
153
154        public long getCompletedByInterval() {
155            return completedByInterval.get();
156        }
157
158        public long getCompletedByTimeout() {
159            return completedByTimeout.get();
160        }
161
162        public long getCompletedByPredicate() {
163            return completedByPredicate.get();
164        }
165
166        public long getCompletedByBatchConsumer() {
167            return completedByBatchConsumer.get();
168        }
169
170        public long getCompletedByForce() {
171            return completedByForce.get();
172        }
173
174        public void reset() {
175            totalIn.set(0);
176            totalCompleted.set(0);
177            completedBySize.set(0);
178            completedByStrategy.set(0);
179            completedByTimeout.set(0);
180            completedByPredicate.set(0);
181            completedByBatchConsumer.set(0);
182            completedByForce.set(0);
183        }
184
185        public boolean isStatisticsEnabled() {
186            return statisticsEnabled;
187        }
188
189        public void setStatisticsEnabled(boolean statisticsEnabled) {
190            this.statisticsEnabled = statisticsEnabled;
191        }
192    }
193
194    // options
195    private boolean ignoreInvalidCorrelationKeys;
196    private Integer closeCorrelationKeyOnCompletion;
197    private boolean parallelProcessing;
198    private boolean optimisticLocking;
199
200    // different ways to have completion triggered
201    private boolean eagerCheckCompletion;
202    private Predicate completionPredicate;
203    private long completionTimeout;
204    private Expression completionTimeoutExpression;
205    private long completionInterval;
206    private int completionSize;
207    private Expression completionSizeExpression;
208    private boolean completionFromBatchConsumer;
209    private AtomicInteger batchConsumerCounter = new AtomicInteger();
210    private boolean discardOnCompletionTimeout;
211    private boolean forceCompletionOnStop;
212    private boolean completeAllOnStop;
213
214    private ProducerTemplate deadLetterProducerTemplate;
215
216    public AggregateProcessor(CamelContext camelContext, Processor processor,
217                              Expression correlationExpression, AggregationStrategy aggregationStrategy,
218                              ExecutorService executorService, boolean shutdownExecutorService) {
219        ObjectHelper.notNull(camelContext, "camelContext");
220        ObjectHelper.notNull(processor, "processor");
221        ObjectHelper.notNull(correlationExpression, "correlationExpression");
222        ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy");
223        ObjectHelper.notNull(executorService, "executorService");
224        this.camelContext = camelContext;
225        this.processor = processor;
226        this.correlationExpression = correlationExpression;
227        this.aggregationStrategy = aggregationStrategy;
228        this.executorService = executorService;
229        this.shutdownExecutorService = shutdownExecutorService;
230        this.exceptionHandler = new LoggingExceptionHandler(camelContext, getClass());
231    }
232
233    @Override
234    public String toString() {
235        return "AggregateProcessor[to: " + processor + "]";
236    }
237
238    public String getTraceLabel() {
239        return "aggregate[" + correlationExpression + "]";
240    }
241
242    public List<Processor> next() {
243        if (!hasNext()) {
244            return null;
245        }
246        List<Processor> answer = new ArrayList<Processor>(1);
247        answer.add(processor);
248        return answer;
249    }
250
251    public boolean hasNext() {
252        return processor != null;
253    }
254
255    public String getId() {
256        return id;
257    }
258
259    public void setId(String id) {
260        this.id = id;
261    }
262
263    public void process(Exchange exchange) throws Exception {
264        AsyncProcessorHelper.process(this, exchange);
265    }
266
267    public boolean process(Exchange exchange, AsyncCallback callback) {
268        try {
269            doProcess(exchange);
270        } catch (Throwable e) {
271            exchange.setException(e);
272        }
273        callback.done(true);
274        return true;
275    }
276
277    protected void doProcess(Exchange exchange) throws Exception {
278
279        if (getStatistics().isStatisticsEnabled()) {
280            totalIn.incrementAndGet();
281        }
282
283        //check for the special header to force completion of all groups (and ignore the exchange otherwise)
284        boolean completeAllGroups = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class);
285        if (completeAllGroups) {
286            forceCompletionOfAllGroups();
287            return;
288        }
289
290        // compute correlation expression
291        String key = correlationExpression.evaluate(exchange, String.class);
292        if (ObjectHelper.isEmpty(key)) {
293            // we have a bad correlation key
294            if (isIgnoreInvalidCorrelationKeys()) {
295                LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange);
296                return;
297            } else {
298                throw new CamelExchangeException("Invalid correlation key", exchange);
299            }
300        }
301
302        // is the correlation key closed?
303        if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) {
304            throw new ClosedCorrelationKeyException(key, exchange);
305        }
306
307        // when optimist locking is enabled we keep trying until we succeed
308        if (optimisticLocking) {
309            List<Exchange> aggregated = null;
310            boolean exhaustedRetries = true;
311            int attempt = 0;
312            do {
313                attempt++;
314                // copy exchange, and do not share the unit of work
315                // the aggregated output runs in another unit of work
316                Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
317                try {
318                    aggregated = doAggregation(key, copy);
319                    exhaustedRetries = false;
320                    break;
321                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
322                    LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}",
323                              new Object[]{attempt, aggregationRepository, key, copy, e});
324                    optimisticLockRetryPolicy.doDelay(attempt);
325                }
326            } while (optimisticLockRetryPolicy.shouldRetry(attempt));
327
328            if (exhaustedRetries) {
329                throw new CamelExchangeException("Exhausted optimistic locking retry attempts, tried " + attempt + " times", exchange,
330                        new OptimisticLockingAggregationRepository.OptimisticLockingException());
331            } else if (aggregated != null) {
332                // we are completed so submit to completion
333                for (Exchange agg : aggregated) {
334                    onSubmitCompletion(key, agg);
335                }
336            }
337        } else {
338            // copy exchange, and do not share the unit of work
339            // the aggregated output runs in another unit of work
340            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
341
342            // when memory based then its fast using synchronized, but if the aggregation repository is IO
343            // bound such as JPA etc then concurrent aggregation per correlation key could
344            // improve performance as we can run aggregation repository get/add in parallel
345            List<Exchange> aggregated = null;
346            lock.lock();
347            try {
348                aggregated = doAggregation(key, copy);
349            } finally {
350                lock.unlock();
351            }
352
353            // we are completed so do that work outside the lock
354            if (aggregated != null) {
355                for (Exchange agg : aggregated) {
356                    onSubmitCompletion(key, agg);
357                }
358            }
359        }
360
361        // check for the special header to force completion of all groups (inclusive of the message)
362        boolean completeAllGroupsInclusive = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE, false, boolean.class);
363        if (completeAllGroupsInclusive) {
364            forceCompletionOfAllGroups();
365        }
366    }
367
368    /**
369     * Aggregates the exchange with the given correlation key
370     * <p/>
371     * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key
372     * in parallel.
373     * <p/>
374     * The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)}
375     * method which sends out the aggregated and completed {@link Exchange}.
376     *
377     * @param key      the correlation key
378     * @param newExchange the exchange
379     * @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete
380     * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
381     */
382    private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException {
383        LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
384
385        List<Exchange> list = new ArrayList<Exchange>();
386        String complete = null;
387
388        Exchange answer;
389        Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key);
390        Exchange oldExchange = originalExchange;
391
392        Integer size = 1;
393        if (oldExchange != null) {
394            // hack to support legacy AggregationStrategy's that modify and return the oldExchange, these will not
395            // working when using an identify based approach for optimistic locking like the MemoryAggregationRepository.
396            if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) {
397                oldExchange = originalExchange.copy();
398            }
399            size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class);
400            size++;
401        }
402
403        // prepare the exchanges for aggregation
404        ExchangeHelper.prepareAggregation(oldExchange, newExchange);
405
406        // check if we are pre complete
407        if (preCompletion) {
408            try {
409                // put the current aggregated size on the exchange so its avail during completion check
410                newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
411                complete = isPreCompleted(key, oldExchange, newExchange);
412                // make sure to track timeouts if not complete
413                if (complete == null) {
414                    trackTimeout(key, newExchange);
415                }
416                // remove it afterwards
417                newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
418            } catch (Throwable e) {
419                // must catch any exception from aggregation
420                throw new CamelExchangeException("Error occurred during preComplete", newExchange, e);
421            }
422        } else if (isEagerCheckCompletion()) {
423            // put the current aggregated size on the exchange so its avail during completion check
424            newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
425            complete = isCompleted(key, newExchange);
426            // make sure to track timeouts if not complete
427            if (complete == null) {
428                trackTimeout(key, newExchange);
429            }
430            // remove it afterwards
431            newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
432        }
433
434        if (preCompletion && complete != null) {
435            // need to pre complete the current group before we aggregate
436            doAggregationComplete(complete, list, key, originalExchange, oldExchange);
437            // as we complete the current group eager, we should indicate the new group is not complete
438            complete = null;
439            // and clear old/original exchange as we start on a new group
440            oldExchange = null;
441            originalExchange = null;
442            // and reset the size to 1
443            size = 1;
444            // make sure to track timeout as we just restart the correlation group when we are in pre completion mode
445            trackTimeout(key, newExchange);
446        }
447
448        // aggregate the exchanges
449        try {
450            answer = onAggregation(oldExchange, newExchange);
451        } catch (Throwable e) {
452            // must catch any exception from aggregation
453            throw new CamelExchangeException("Error occurred during aggregation", newExchange, e);
454        }
455        if (answer == null) {
456            throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange);
457        }
458
459        // special for some repository implementations
460        if (aggregationRepository instanceof RecoverableAggregationRepository) {
461            boolean valid = oldExchange == null || answer.getExchangeId().equals(oldExchange.getExchangeId());
462            if (!valid && aggregateRepositoryWarned.compareAndSet(false, true)) {
463                LOG.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible"
464                    + " as otherwise this can lead to unexpected behavior with some RecoverableAggregationRepository implementations");
465            }
466        }
467
468        // update the aggregated size
469        answer.setProperty(Exchange.AGGREGATED_SIZE, size);
470
471        // maybe we should check completion after the aggregation
472        if (!preCompletion && !isEagerCheckCompletion()) {
473            complete = isCompleted(key, answer);
474            // make sure to track timeouts if not complete
475            if (complete == null) {
476                trackTimeout(key, newExchange);
477            }
478        }
479
480        if (complete == null) {
481            // only need to update aggregation repository if we are not complete
482            doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer);
483        } else {
484            // if we are complete then add the answer to the list
485            doAggregationComplete(complete, list, key, originalExchange, answer);
486        }
487
488        LOG.trace("onAggregation +++  end  +++ with correlation key: {}", key);
489        return list;
490    }
491
492    protected void doAggregationComplete(String complete, List<Exchange> list, String key, Exchange originalExchange, Exchange answer) {
493        if ("consumer".equals(complete)) {
494            for (String batchKey : batchConsumerCorrelationKeys) {
495                Exchange batchAnswer;
496                if (batchKey.equals(key)) {
497                    // skip the current aggregated key as we have already aggregated it and have the answer
498                    batchAnswer = answer;
499                } else {
500                    batchAnswer = aggregationRepository.get(camelContext, batchKey);
501                }
502
503                if (batchAnswer != null) {
504                    batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
505                    onCompletion(batchKey, originalExchange, batchAnswer, false);
506                    list.add(batchAnswer);
507                }
508            }
509            batchConsumerCorrelationKeys.clear();
510            // we have already submitted to completion, so answer should be null
511            answer = null;
512        } else if (answer != null) {
513            // we are complete for this exchange
514            answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete);
515            answer = onCompletion(key, originalExchange, answer, false);
516        }
517
518        if (answer != null) {
519            list.add(answer);
520        }
521    }
522
523    protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) {
524        LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", new Object[]{oldExchange, newExchange, key});
525        if (optimisticLocking) {
526            try {
527                ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange);
528            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
529                onOptimisticLockingFailure(oldExchange, newExchange);
530                throw e;
531            }
532        } else {
533            aggregationRepository.add(camelContext, key, newExchange);
534        }
535    }
536
537    protected void onOptimisticLockingFailure(Exchange oldExchange, Exchange newExchange) {
538        AggregationStrategy strategy = aggregationStrategy;
539        if (strategy instanceof DelegateAggregationStrategy) {
540            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
541        }
542        if (strategy instanceof OptimisticLockingAwareAggregationStrategy) {
543            LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}",
544                      new Object[]{strategy, oldExchange, newExchange});
545            ((OptimisticLockingAwareAggregationStrategy)strategy).onOptimisticLockFailure(oldExchange, newExchange);
546        }
547    }
548
549    /**
550     * Tests whether the given exchanges is pre-complete or not
551     *
552     * @param key      the correlation key
553     * @param oldExchange   the existing exchange
554     * @param newExchange the incoming exchange
555     * @return <tt>null</tt> if not pre-completed, otherwise a String with the type that triggered the pre-completion
556     */
557    protected String isPreCompleted(String key, Exchange oldExchange, Exchange newExchange) {
558        boolean complete = false;
559        AggregationStrategy strategy = aggregationStrategy;
560        if (strategy instanceof DelegateAggregationStrategy) {
561            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
562        }
563        if (strategy instanceof PreCompletionAwareAggregationStrategy) {
564            complete = ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange);
565        }
566        return complete ? "strategy" : null;
567    }
568
569    /**
570     * Tests whether the given exchange is complete or not
571     *
572     * @param key      the correlation key
573     * @param exchange the incoming exchange
574     * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion
575     */
576    protected String isCompleted(String key, Exchange exchange) {
577        // batch consumer completion must always run first
578        if (isCompletionFromBatchConsumer()) {
579            batchConsumerCorrelationKeys.add(key);
580            batchConsumerCounter.incrementAndGet();
581            int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class);
582            if (size > 0 && batchConsumerCounter.intValue() >= size) {
583                // batch consumer is complete then reset the counter
584                batchConsumerCounter.set(0);
585                return "consumer";
586            }
587        }
588
589        if (exchange.getProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, false, boolean.class)) {
590            return "strategy";
591        }
592
593        if (getCompletionPredicate() != null) {
594            boolean answer = getCompletionPredicate().matches(exchange);
595            if (answer) {
596                return "predicate";
597            }
598        }
599
600        boolean sizeChecked = false;
601        if (getCompletionSizeExpression() != null) {
602            Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class);
603            if (value != null && value > 0) {
604                // mark as already checked size as expression takes precedence over static configured
605                sizeChecked = true;
606                int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
607                if (size >= value) {
608                    return "size";
609                }
610            }
611        }
612        if (!sizeChecked && getCompletionSize() > 0) {
613            int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class);
614            if (size >= getCompletionSize()) {
615                return "size";
616            }
617        }
618
619        // not complete
620        return null;
621    }
622
623    protected void trackTimeout(String key, Exchange exchange) {
624        // timeout can be either evaluated based on an expression or from a fixed value
625        // expression takes precedence
626        boolean timeoutSet = false;
627        if (getCompletionTimeoutExpression() != null) {
628            Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class);
629            if (value != null && value > 0) {
630                if (LOG.isTraceEnabled()) {
631                    LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
632                            new Object[]{key, value, exchange});
633                }
634                addExchangeToTimeoutMap(key, exchange, value);
635                timeoutSet = true;
636            }
637        }
638        if (!timeoutSet && getCompletionTimeout() > 0) {
639            // timeout is used so use the timeout map to keep an eye on this
640            if (LOG.isTraceEnabled()) {
641                LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}",
642                        new Object[]{key, getCompletionTimeout(), exchange});
643            }
644            addExchangeToTimeoutMap(key, exchange, getCompletionTimeout());
645        }
646    }
647
648    protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) {
649        return aggregationStrategy.aggregate(oldExchange, newExchange);
650    }
651
652    protected boolean onPreCompletionAggregation(Exchange oldExchange, Exchange newExchange) {
653        AggregationStrategy strategy = aggregationStrategy;
654        if (strategy instanceof DelegateAggregationStrategy) {
655            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
656        }
657        if (strategy instanceof PreCompletionAwareAggregationStrategy) {
658            return ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange);
659        }
660        return false;
661    }
662
663    protected Exchange onCompletion(final String key, final Exchange original, final Exchange aggregated, boolean fromTimeout) {
664        // store the correlation key as property before we remove so the repository has that information
665        if (original != null) {
666            original.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
667        }
668        aggregated.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key);
669
670        // only remove if we have previous added (as we could potentially complete with only 1 exchange)
671        // (if we have previous added then we have that as the original exchange)
672        if (original != null) {
673            // remove from repository as its completed, we do this first as to trigger any OptimisticLockingException's
674            aggregationRepository.remove(aggregated.getContext(), key, original);
675        }
676
677        if (!fromTimeout && timeoutMap != null) {
678            // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker)
679            LOG.trace("Removing correlation key {} from timeout", key);
680            timeoutMap.remove(key);
681        }
682
683        // this key has been closed so add it to the closed map
684        if (closedCorrelationKeys != null) {
685            closedCorrelationKeys.put(key, key);
686        }
687
688        if (fromTimeout) {
689            // invoke timeout if its timeout aware aggregation strategy,
690            // to allow any custom processing before discarding the exchange
691            AggregationStrategy strategy = aggregationStrategy;
692            if (strategy instanceof DelegateAggregationStrategy) {
693                strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
694            }
695            if (strategy instanceof TimeoutAwareAggregationStrategy) {
696                long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1;
697                ((TimeoutAwareAggregationStrategy) strategy).timeout(aggregated, -1, -1, timeout);
698            }
699        }
700
701        Exchange answer;
702        if (fromTimeout && isDiscardOnCompletionTimeout()) {
703            // discard due timeout
704            LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated);
705            // must confirm the discarded exchange
706            aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId());
707            // and remove redelivery state as well
708            redeliveryState.remove(aggregated.getExchangeId());
709            // the completion was from timeout and we should just discard it
710            answer = null;
711        } else {
712            // the aggregated exchange should be published (sent out)
713            answer = aggregated;
714        }
715
716        return answer;
717    }
718
719    private void onSubmitCompletion(final String key, final Exchange exchange) {
720        LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange);
721
722        // add this as in progress before we submit the task
723        inProgressCompleteExchanges.add(exchange.getExchangeId());
724
725        // invoke the on completion callback
726        AggregationStrategy target = aggregationStrategy;
727        if (target instanceof DelegateAggregationStrategy) {
728            target = ((DelegateAggregationStrategy) target).getDelegate();
729        }
730        if (target instanceof CompletionAwareAggregationStrategy) {
731            ((CompletionAwareAggregationStrategy) target).onCompletion(exchange);
732        }
733
734        if (getStatistics().isStatisticsEnabled()) {
735            totalCompleted.incrementAndGet();
736
737            String completedBy = exchange.getProperty(Exchange.AGGREGATED_COMPLETED_BY, String.class);
738            if ("interval".equals(completedBy)) {
739                completedByInterval.incrementAndGet();
740            } else if ("timeout".equals(completedBy)) {
741                completedByTimeout.incrementAndGet();
742            } else if ("force".equals(completedBy)) {
743                completedByForce.incrementAndGet();
744            } else if ("consumer".equals(completedBy)) {
745                completedByBatchConsumer.incrementAndGet();
746            } else if ("predicate".equals(completedBy)) {
747                completedByPredicate.incrementAndGet();
748            } else if ("size".equals(completedBy)) {
749                completedBySize.incrementAndGet();
750            } else if ("strategy".equals(completedBy)) {
751                completedByStrategy.incrementAndGet();
752            }
753        }
754
755        // send this exchange
756        executorService.submit(new Runnable() {
757            public void run() {
758                LOG.debug("Processing aggregated exchange: {}", exchange);
759
760                // add on completion task so we remember to update the inProgressCompleteExchanges
761                exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId()));
762
763                try {
764                    processor.process(exchange);
765                } catch (Throwable e) {
766                    exchange.setException(e);
767                }
768
769                // log exception if there was a problem
770                if (exchange.getException() != null) {
771                    // if there was an exception then let the exception handler handle it
772                    getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException());
773                } else {
774                    LOG.trace("Processing aggregated exchange: {} complete.", exchange);
775                }
776            }
777        });
778    }
779
780    /**
781     * Restores the timeout map with timeout values from the aggregation repository.
782     * <p/>
783     * This is needed in case the aggregator has been stopped and started again (for example a server restart).
784     * Then the existing exchanges from the {@link AggregationRepository} must have their timeout conditions restored.
785     */
786    protected void restoreTimeoutMapFromAggregationRepository() throws Exception {
787        // grab the timeout value for each partly aggregated exchange
788        Set<String> keys = aggregationRepository.getKeys();
789        if (keys == null || keys.isEmpty()) {
790            return;
791        }
792
793        StopWatch watch = new StopWatch();
794        LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size());
795
796        for (String key : keys) {
797            Exchange exchange = aggregationRepository.get(camelContext, key);
798            // grab the timeout value
799            long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0;
800            if (timeout > 0) {
801                LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout);
802                addExchangeToTimeoutMap(key, exchange, timeout);
803            }
804        }
805
806        // log duration of this task so end user can see how long it takes to pre-check this upon starting
807        LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}",
808                timeoutMap.size(), TimeUtils.printDuration(watch.stop()));
809    }
810
811    /**
812     * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts.
813     *
814     * @param key      the correlation key
815     * @param exchange the exchange
816     * @param timeout  the timeout value in millis
817     */
818    private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) {
819        // store the timeout value on the exchange as well, in case we need it later
820        exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout);
821        timeoutMap.put(key, exchange.getExchangeId(), timeout);
822    }
823
824    /**
825     * Current number of closed correlation keys in the memory cache
826     */
827    public int getClosedCorrelationKeysCacheSize() {
828        if (closedCorrelationKeys != null) {
829            return closedCorrelationKeys.size();
830        } else {
831            return 0;
832        }
833    }
834
835    /**
836     * Clear all the closed correlation keys stored in the cache
837     */
838    public void clearClosedCorrelationKeysCache() {
839        if (closedCorrelationKeys != null) {
840            closedCorrelationKeys.clear();
841        }
842    }
843
844    public AggregateProcessorStatistics getStatistics() {
845        return statistics;
846    }
847
848    public int getInProgressCompleteExchanges() {
849        return inProgressCompleteExchanges.size();
850    }
851
852    public Predicate getCompletionPredicate() {
853        return completionPredicate;
854    }
855
856    public void setCompletionPredicate(Predicate completionPredicate) {
857        this.completionPredicate = completionPredicate;
858    }
859
860    public boolean isEagerCheckCompletion() {
861        return eagerCheckCompletion;
862    }
863
864    public void setEagerCheckCompletion(boolean eagerCheckCompletion) {
865        this.eagerCheckCompletion = eagerCheckCompletion;
866    }
867
868    public long getCompletionTimeout() {
869        return completionTimeout;
870    }
871
872    public void setCompletionTimeout(long completionTimeout) {
873        this.completionTimeout = completionTimeout;
874    }
875
876    public Expression getCompletionTimeoutExpression() {
877        return completionTimeoutExpression;
878    }
879
880    public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) {
881        this.completionTimeoutExpression = completionTimeoutExpression;
882    }
883
884    public long getCompletionInterval() {
885        return completionInterval;
886    }
887
888    public void setCompletionInterval(long completionInterval) {
889        this.completionInterval = completionInterval;
890    }
891
892    public int getCompletionSize() {
893        return completionSize;
894    }
895
896    public void setCompletionSize(int completionSize) {
897        this.completionSize = completionSize;
898    }
899
900    public Expression getCompletionSizeExpression() {
901        return completionSizeExpression;
902    }
903
904    public void setCompletionSizeExpression(Expression completionSizeExpression) {
905        this.completionSizeExpression = completionSizeExpression;
906    }
907
908    public boolean isIgnoreInvalidCorrelationKeys() {
909        return ignoreInvalidCorrelationKeys;
910    }
911
912    public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) {
913        this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys;
914    }
915
916    public Integer getCloseCorrelationKeyOnCompletion() {
917        return closeCorrelationKeyOnCompletion;
918    }
919
920    public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) {
921        this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion;
922    }
923
924    public boolean isCompletionFromBatchConsumer() {
925        return completionFromBatchConsumer;
926    }
927
928    public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) {
929        this.completionFromBatchConsumer = completionFromBatchConsumer;
930    }
931
932    public boolean isCompleteAllOnStop() {
933        return completeAllOnStop;
934    }
935
936    public ExceptionHandler getExceptionHandler() {
937        return exceptionHandler;
938    }
939
940    public void setExceptionHandler(ExceptionHandler exceptionHandler) {
941        this.exceptionHandler = exceptionHandler;
942    }
943
944    public boolean isParallelProcessing() {
945        return parallelProcessing;
946    }
947
948    public void setParallelProcessing(boolean parallelProcessing) {
949        this.parallelProcessing = parallelProcessing;
950    }
951
952    public boolean isOptimisticLocking() {
953        return optimisticLocking;
954    }
955
956    public void setOptimisticLocking(boolean optimisticLocking) {
957        this.optimisticLocking = optimisticLocking;
958    }
959
960    public AggregationRepository getAggregationRepository() {
961        return aggregationRepository;
962    }
963
964    public void setAggregationRepository(AggregationRepository aggregationRepository) {
965        this.aggregationRepository = aggregationRepository;
966    }
967
968    public boolean isDiscardOnCompletionTimeout() {
969        return discardOnCompletionTimeout;
970    }
971
972    public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) {
973        this.discardOnCompletionTimeout = discardOnCompletionTimeout;
974    }
975
976    public void setForceCompletionOnStop(boolean forceCompletionOnStop) {
977        this.forceCompletionOnStop = forceCompletionOnStop;
978    }
979
980    public void setCompleteAllOnStop(boolean completeAllOnStop) {
981        this.completeAllOnStop = completeAllOnStop;
982    }
983
984    public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) {
985        this.timeoutCheckerExecutorService = timeoutCheckerExecutorService;
986    }
987
988    public ScheduledExecutorService getTimeoutCheckerExecutorService() {
989        return timeoutCheckerExecutorService;
990    }
991
992    public boolean isShutdownTimeoutCheckerExecutorService() {
993        return shutdownTimeoutCheckerExecutorService;
994    }
995
996    public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) {
997        this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService;
998    }
999
1000    public void setOptimisticLockRetryPolicy(OptimisticLockRetryPolicy optimisticLockRetryPolicy) {
1001        this.optimisticLockRetryPolicy = optimisticLockRetryPolicy;
1002    }
1003
1004    public OptimisticLockRetryPolicy getOptimisticLockRetryPolicy() {
1005        return optimisticLockRetryPolicy;
1006    }
1007
1008    public AggregationStrategy getAggregationStrategy() {
1009        return aggregationStrategy;
1010    }
1011
1012    public void setAggregationStrategy(AggregationStrategy aggregationStrategy) {
1013        this.aggregationStrategy = aggregationStrategy;
1014    }
1015
1016    public Expression getCorrelationExpression() {
1017        return correlationExpression;
1018    }
1019
1020    public void setCorrelationExpression(Expression correlationExpression) {
1021        this.correlationExpression = correlationExpression;
1022    }
1023
1024    public AggregateController getAggregateController() {
1025        return aggregateController;
1026    }
1027
1028    public void setAggregateController(AggregateController aggregateController) {
1029        this.aggregateController = aggregateController;
1030    }
1031
1032    /**
1033     * On completion task which keeps the booking of the in progress up to date
1034     */
1035    private final class AggregateOnCompletion implements Synchronization {
1036        private final String exchangeId;
1037
1038        private AggregateOnCompletion(String exchangeId) {
1039            // must use the original exchange id as it could potentially change if send over SEDA etc.
1040            this.exchangeId = exchangeId;
1041        }
1042
1043        public void onFailure(Exchange exchange) {
1044            LOG.trace("Aggregated exchange onFailure: {}", exchange);
1045
1046            // must remember to remove in progress when we failed
1047            inProgressCompleteExchanges.remove(exchangeId);
1048            // do not remove redelivery state as we need it when we redeliver again later
1049        }
1050
1051        public void onComplete(Exchange exchange) {
1052            LOG.trace("Aggregated exchange onComplete: {}", exchange);
1053
1054            // only confirm if we processed without a problem
1055            try {
1056                aggregationRepository.confirm(exchange.getContext(), exchangeId);
1057                // and remove redelivery state as well
1058                redeliveryState.remove(exchangeId);
1059            } finally {
1060                // must remember to remove in progress when we are complete
1061                inProgressCompleteExchanges.remove(exchangeId);
1062            }
1063        }
1064
1065        @Override
1066        public String toString() {
1067            return "AggregateOnCompletion";
1068        }
1069    }
1070
1071    /**
1072     * Background task that looks for aggregated exchanges which is triggered by completion timeouts.
1073     */
1074    private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> {
1075
1076        private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) {
1077            // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead
1078            super(executor, requestMapPollTimeMillis, optimisticLocking);
1079        }
1080
1081        @Override
1082        public void purge() {
1083            // must acquire the shared aggregation lock to be able to purge
1084            if (!optimisticLocking) {
1085                lock.lock();
1086            }
1087            try {
1088                super.purge();
1089            } finally {
1090                if (!optimisticLocking) {
1091                    lock.unlock();
1092                }
1093            }
1094        }
1095
1096        @Override
1097        public boolean onEviction(String key, String exchangeId) {
1098            log.debug("Completion timeout triggered for correlation key: {}", key);
1099
1100            boolean inProgress = inProgressCompleteExchanges.contains(exchangeId);
1101            if (inProgress) {
1102                LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
1103                return true;
1104            }
1105
1106            // get the aggregated exchange
1107            boolean evictionStolen = false;
1108            Exchange answer = aggregationRepository.get(camelContext, key);
1109            if (answer == null) {
1110                evictionStolen = true;
1111            } else {
1112                // indicate it was completed by timeout
1113                answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout");
1114                try {
1115                    answer = onCompletion(key, answer, answer, true);
1116                    if (answer != null) {
1117                        onSubmitCompletion(key, answer);
1118                    }
1119                } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
1120                    evictionStolen = true;
1121                }
1122            }
1123
1124            if (optimisticLocking && evictionStolen) {
1125                LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction "
1126                          + "for exchange with id: {} and correlation id: {}", exchangeId, key);
1127            }
1128            return true;
1129        }
1130    }
1131
1132    /**
1133     * Background task that triggers completion based on interval.
1134     */
1135    private final class AggregationIntervalTask implements Runnable {
1136
1137        public void run() {
1138            // only run if CamelContext has been fully started
1139            if (!camelContext.getStatus().isStarted()) {
1140                LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName());
1141                return;
1142            }
1143
1144            LOG.trace("Starting completion interval task");
1145
1146            // trigger completion for all in the repository
1147            Set<String> keys = aggregationRepository.getKeys();
1148
1149            if (keys != null && !keys.isEmpty()) {
1150                // must acquire the shared aggregation lock to be able to trigger interval completion
1151                if (!optimisticLocking) {
1152                    lock.lock();
1153                }
1154                try {
1155                    for (String key : keys) {
1156                        boolean stolenInterval = false;
1157                        Exchange exchange = aggregationRepository.get(camelContext, key);
1158                        if (exchange == null) {
1159                            stolenInterval = true;
1160                        } else {
1161                            LOG.trace("Completion interval triggered for correlation key: {}", key);
1162                            // indicate it was completed by interval
1163                            exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval");
1164                            try {
1165                                Exchange answer = onCompletion(key, exchange, exchange, false);
1166                                if (answer != null) {
1167                                    onSubmitCompletion(key, answer);
1168                                }
1169                            } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) {
1170                                stolenInterval = true;
1171                            }
1172                        }
1173                        if (optimisticLocking && stolenInterval) {
1174                            LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key);
1175                        }
1176                    }
1177                } finally {
1178                    if (!optimisticLocking) {
1179                        lock.unlock();
1180                    }
1181                }
1182            }
1183
1184            LOG.trace("Completion interval task complete");
1185        }
1186    }
1187
1188    /**
1189     * Background task that looks for aggregated exchanges to recover.
1190     */
1191    private final class RecoverTask implements Runnable {
1192        private final RecoverableAggregationRepository recoverable;
1193
1194        private RecoverTask(RecoverableAggregationRepository recoverable) {
1195            this.recoverable = recoverable;
1196        }
1197
1198        public void run() {
1199            // only run if CamelContext has been fully started
1200            if (!camelContext.getStatus().isStarted()) {
1201                LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName());
1202                return;
1203            }
1204
1205            LOG.trace("Starting recover check");
1206
1207            // copy the current in progress before doing scan
1208            final Set<String> copyOfInProgress = new LinkedHashSet<String>(inProgressCompleteExchanges);
1209
1210            Set<String> exchangeIds = recoverable.scan(camelContext);
1211            for (String exchangeId : exchangeIds) {
1212
1213                // we may shutdown while doing recovery
1214                if (!isRunAllowed()) {
1215                    LOG.info("We are shutting down so stop recovering");
1216                    return;
1217                }
1218
1219                // consider in progress if it was in progress before we did the scan, or currently after we did the scan
1220                // its safer to consider it in progress than risk duplicates due both in progress + recovered
1221                boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId);
1222                if (inProgress) {
1223                    LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId);
1224                } else {
1225                    LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId);
1226                    Exchange exchange = recoverable.recover(camelContext, exchangeId);
1227                    if (exchange != null) {
1228                        // get the correlation key
1229                        String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class);
1230                        // and mark it as redelivered
1231                        exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE);
1232
1233                        // get the current redelivery data
1234                        RedeliveryData data = redeliveryState.get(exchange.getExchangeId());
1235
1236                        // if we are exhausted, then move to dead letter channel
1237                        if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) {
1238                            LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries()
1239                                    + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri());
1240
1241                            // send to DLC
1242                            try {
1243                                // set redelivery counter
1244                                exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1245                                exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE);
1246                                deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange);
1247                            } catch (Throwable e) {
1248                                exchange.setException(e);
1249                            }
1250
1251                            // handle if failed
1252                            if (exchange.getException() != null) {
1253                                getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException());
1254                            } else {
1255                                // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again
1256                                recoverable.confirm(camelContext, exchangeId);
1257                            }
1258                        } else {
1259                            // update current redelivery state
1260                            if (data == null) {
1261                                // create new data
1262                                data = new RedeliveryData();
1263                                redeliveryState.put(exchange.getExchangeId(), data);
1264                            }
1265                            data.redeliveryCounter++;
1266
1267                            // set redelivery counter
1268                            exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter);
1269                            if (recoverable.getMaximumRedeliveries() > 0) {
1270                                exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries());
1271                            }
1272
1273                            LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId);
1274
1275                            // not exhaust so resubmit the recovered exchange
1276                            onSubmitCompletion(key, exchange);
1277                        }
1278                    }
1279                }
1280            }
1281
1282            LOG.trace("Recover check complete");
1283        }
1284    }
1285
1286    @Override
1287    protected void doStart() throws Exception {
1288        AggregationStrategy strategy = aggregationStrategy;
1289        if (strategy instanceof DelegateAggregationStrategy) {
1290            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
1291        }
1292        if (strategy instanceof CamelContextAware) {
1293            ((CamelContextAware) strategy).setCamelContext(camelContext);
1294        }
1295        if (strategy instanceof PreCompletionAwareAggregationStrategy) {
1296            preCompletion = true;
1297            LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId());
1298        }
1299
1300        if (!preCompletion) {
1301            // if not in pre completion mode then check we configured the completion required
1302            if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null
1303                    && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null
1304                    && getCompletionSizeExpression() == null) {
1305                throw new IllegalStateException("At least one of the completions options"
1306                        + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set");
1307            }
1308        }
1309
1310        if (getCloseCorrelationKeyOnCompletion() != null) {
1311            if (getCloseCorrelationKeyOnCompletion() > 0) {
1312                LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of " + getCloseCorrelationKeyOnCompletion());
1313                closedCorrelationKeys = new LRUCache<String, String>(getCloseCorrelationKeyOnCompletion());
1314            } else {
1315                LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
1316                closedCorrelationKeys = new ConcurrentHashMap<String, String>();
1317            }
1318        }
1319
1320        if (aggregationRepository == null) {
1321            aggregationRepository = new MemoryAggregationRepository(optimisticLocking);
1322            LOG.info("Defaulting to MemoryAggregationRepository");
1323        }
1324
1325        if (optimisticLocking) {
1326            if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) {
1327                throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository");
1328            }
1329            LOG.info("Optimistic locking is enabled");
1330        }
1331
1332        ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository);
1333
1334        // should we use recover checker
1335        if (aggregationRepository instanceof RecoverableAggregationRepository) {
1336            RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository;
1337            if (recoverable.isUseRecovery()) {
1338                long interval = recoverable.getRecoveryIntervalInMillis();
1339                if (interval <= 0) {
1340                    throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval);
1341                }
1342
1343                // create a background recover thread to check every interval
1344                recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
1345                Runnable recoverTask = new RecoverTask(recoverable);
1346                LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every " + interval + " millis.");
1347                // use fixed delay so there is X interval between each run
1348                recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
1349
1350                if (recoverable.getDeadLetterUri() != null) {
1351                    int max = recoverable.getMaximumRedeliveries();
1352                    if (max <= 0) {
1353                        throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
1354                    }
1355                    LOG.info("After " + max + " failed redelivery attempts Exchanges will be moved to deadLetterUri: " + recoverable.getDeadLetterUri());
1356
1357                    // dead letter uri must be a valid endpoint
1358                    Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
1359                    if (endpoint == null) {
1360                        throw new NoSuchEndpointException(recoverable.getDeadLetterUri());
1361                    }
1362                    deadLetterProducerTemplate = camelContext.createProducerTemplate();
1363                }
1364            }
1365        }
1366
1367        if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) {
1368            throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
1369        }
1370        if (getCompletionInterval() > 0) {
1371            LOG.info("Using CompletionInterval to run every " + getCompletionInterval() + " millis.");
1372            if (getTimeoutCheckerExecutorService() == null) {
1373                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1374                shutdownTimeoutCheckerExecutorService = true;
1375            }
1376            // trigger completion based on interval
1377            getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS);
1378        }
1379
1380        // start timeout service if its in use
1381        if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
1382            LOG.info("Using CompletionTimeout to trigger after " + getCompletionTimeout() + " millis of inactivity.");
1383            if (getTimeoutCheckerExecutorService() == null) {
1384                setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
1385                shutdownTimeoutCheckerExecutorService = true;
1386            }
1387            // check for timed out aggregated messages once every second
1388            timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), 1000L);
1389            // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we
1390            // need to re-establish the timeout map so timeout can trigger
1391            restoreTimeoutMapFromAggregationRepository();
1392            ServiceHelper.startService(timeoutMap);
1393        }
1394
1395        if (aggregateController == null) {
1396            aggregateController = new DefaultAggregateController();
1397        }
1398        aggregateController.onStart(this);
1399    }
1400
1401    @Override
1402    protected void doStop() throws Exception {
1403        // note: we cannot do doForceCompletionOnStop from this doStop method
1404        // as this is handled in the prepareShutdown method which is also invoked when stopping a route
1405        // and is better suited for preparing to shutdown than this doStop method is
1406
1407        if (aggregateController != null) {
1408            aggregateController.onStop(this);
1409        }
1410
1411        if (recoverService != null) {
1412            camelContext.getExecutorServiceManager().shutdown(recoverService);
1413        }
1414        ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate);
1415
1416        if (closedCorrelationKeys != null) {
1417            // it may be a service so stop it as well
1418            ServiceHelper.stopService(closedCorrelationKeys);
1419            closedCorrelationKeys.clear();
1420        }
1421        batchConsumerCorrelationKeys.clear();
1422        redeliveryState.clear();
1423    }
1424
1425    @Override
1426    public void prepareShutdown(boolean suspendOnly, boolean forced) {
1427        // we are shutting down, so force completion if this option was enabled
1428        // but only do this when forced=false, as that is when we have chance to
1429        // send out new messages to be routed by Camel. When forced=true, then
1430        // we have to shutdown in a hurry
1431        if (!forced && forceCompletionOnStop) {
1432            doForceCompletionOnStop();
1433        }
1434    }
1435
1436    @Override
1437    public boolean deferShutdown(ShutdownRunningTask shutdownRunningTask) {
1438        // not in use
1439        return true;
1440    }
1441
1442    @Override
1443    public int getPendingExchangesSize() {
1444        if (completeAllOnStop) {
1445            // we want to regard all pending exchanges in the repo as inflight
1446            Set<String> keys = getAggregationRepository().getKeys();
1447            return keys != null ? keys.size() : 0;
1448        } else {
1449            return 0;
1450        }
1451    }
1452
1453    private void doForceCompletionOnStop() {
1454        int expected = forceCompletionOfAllGroups();
1455
1456        StopWatch watch = new StopWatch();
1457        while (inProgressCompleteExchanges.size() > 0) {
1458            LOG.trace("Waiting for {} inflight exchanges to complete", getInProgressCompleteExchanges());
1459            try {
1460                Thread.sleep(100);
1461            } catch (InterruptedException e) {
1462                // break out as we got interrupted such as the JVM terminating
1463                LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", getInProgressCompleteExchanges());
1464                break;
1465            }
1466        }
1467
1468        if (expected > 0) {
1469            LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.stop()));
1470        }
1471    }
1472
1473    @Override
1474    protected void doShutdown() throws Exception {
1475        // shutdown aggregation repository and the strategy
1476        ServiceHelper.stopAndShutdownServices(aggregationRepository, aggregationStrategy);
1477
1478        // cleanup when shutting down
1479        inProgressCompleteExchanges.clear();
1480
1481        if (shutdownExecutorService) {
1482            camelContext.getExecutorServiceManager().shutdownNow(executorService);
1483        }
1484        if (shutdownTimeoutCheckerExecutorService) {
1485            camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService);
1486            timeoutCheckerExecutorService = null;
1487        }
1488
1489        super.doShutdown();
1490    }
1491
1492    public int forceCompletionOfGroup(String key) {
1493        // must acquire the shared aggregation lock to be able to trigger force completion
1494        int total = 0;
1495
1496        if (!optimisticLocking) {
1497            lock.lock();
1498        }
1499        try {
1500            Exchange exchange = aggregationRepository.get(camelContext, key);
1501            if (exchange != null) {
1502                total = 1;
1503                LOG.trace("Force completion triggered for correlation key: {}", key);
1504                // indicate it was completed by a force completion request
1505                exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
1506                Exchange answer = onCompletion(key, exchange, exchange, false);
1507                if (answer != null) {
1508                    onSubmitCompletion(key, answer);
1509                }
1510            }
1511        } finally {
1512            if (!optimisticLocking) {
1513                lock.unlock(); 
1514            }
1515        }
1516        LOG.trace("Completed force completion of group {}", key);
1517
1518        if (total > 0) {
1519            LOG.debug("Forcing completion of group {} with {} exchanges", key, total);
1520        }
1521        return total;
1522    }
1523
1524    public int forceCompletionOfAllGroups() {
1525
1526        // only run if CamelContext has been fully started or is stopping
1527        boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping();
1528        if (!allow) {
1529            LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName());
1530            return 0;
1531        }
1532
1533        LOG.trace("Starting force completion of all groups task");
1534
1535        // trigger completion for all in the repository
1536        Set<String> keys = aggregationRepository.getKeys();
1537
1538        int total = 0;
1539        if (keys != null && !keys.isEmpty()) {
1540            // must acquire the shared aggregation lock to be able to trigger force completion
1541            if (!optimisticLocking) {
1542                lock.lock(); 
1543            }
1544            total = keys.size();
1545            try {
1546                for (String key : keys) {
1547                    Exchange exchange = aggregationRepository.get(camelContext, key);
1548                    if (exchange != null) {
1549                        LOG.trace("Force completion triggered for correlation key: {}", key);
1550                        // indicate it was completed by a force completion request
1551                        exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force");
1552                        Exchange answer = onCompletion(key, exchange, exchange, false);
1553                        if (answer != null) {
1554                            onSubmitCompletion(key, answer);
1555                        }
1556                    }
1557                }
1558            } finally {
1559                if (!optimisticLocking) {
1560                    lock.unlock();
1561                }
1562            }
1563        }
1564        LOG.trace("Completed force completion of all groups task");
1565
1566        if (total > 0) {
1567            LOG.debug("Forcing completion of all groups with {} exchanges", total);
1568        }
1569        return total;
1570    }
1571
1572}