001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor;
018
019import java.io.Closeable;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Iterator;
023import java.util.List;
024import java.util.Map;
025import java.util.concurrent.Callable;
026import java.util.concurrent.CompletionService;
027import java.util.concurrent.ConcurrentHashMap;
028import java.util.concurrent.ConcurrentMap;
029import java.util.concurrent.CountDownLatch;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorCompletionService;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.concurrent.atomic.AtomicInteger;
037
038import org.apache.camel.AsyncCallback;
039import org.apache.camel.AsyncProcessor;
040import org.apache.camel.CamelContext;
041import org.apache.camel.CamelContextAware;
042import org.apache.camel.CamelExchangeException;
043import org.apache.camel.Endpoint;
044import org.apache.camel.ErrorHandlerFactory;
045import org.apache.camel.Exchange;
046import org.apache.camel.Navigate;
047import org.apache.camel.Processor;
048import org.apache.camel.Producer;
049import org.apache.camel.StreamCache;
050import org.apache.camel.Traceable;
051import org.apache.camel.processor.aggregate.AggregationStrategy;
052import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy;
053import org.apache.camel.processor.aggregate.DelegateAggregationStrategy;
054import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
055import org.apache.camel.spi.IdAware;
056import org.apache.camel.spi.RouteContext;
057import org.apache.camel.spi.TracedRouteNodes;
058import org.apache.camel.spi.UnitOfWork;
059import org.apache.camel.support.ServiceSupport;
060import org.apache.camel.util.AsyncProcessorConverterHelper;
061import org.apache.camel.util.AsyncProcessorHelper;
062import org.apache.camel.util.CastUtils;
063import org.apache.camel.util.EventHelper;
064import org.apache.camel.util.ExchangeHelper;
065import org.apache.camel.util.IOHelper;
066import org.apache.camel.util.KeyValueHolder;
067import org.apache.camel.util.ObjectHelper;
068import org.apache.camel.util.ServiceHelper;
069import org.apache.camel.util.StopWatch;
070import org.apache.camel.util.concurrent.AtomicException;
071import org.apache.camel.util.concurrent.AtomicExchange;
072import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
073import org.slf4j.Logger;
074import org.slf4j.LoggerFactory;
075
076import static org.apache.camel.util.ObjectHelper.notNull;
077
078
079/**
080 * Implements the Multicast pattern to send a message exchange to a number of
081 * endpoints, each endpoint receiving a copy of the message exchange.
082 *
083 * @version
084 * @see Pipeline
085 */
086public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
087
088    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
089
090    /**
091     * Class that represent each step in the multicast route to do
092     */
093    static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
094        private final int index;
095        private final Processor processor;
096        private final Processor prepared;
097        private final Exchange exchange;
098
099        private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
100            this.index = index;
101            this.processor = processor;
102            this.prepared = prepared;
103            this.exchange = exchange;
104        }
105
106        public int getIndex() {
107            return index;
108        }
109
110        public Exchange getExchange() {
111            return exchange;
112        }
113
114        public Producer getProducer() {
115            if (processor instanceof Producer) {
116                return (Producer) processor;
117            }
118            return null;
119        }
120
121        public Processor getProcessor() {
122            return prepared;
123        }
124
125        public void begin() {
126            // noop
127        }
128
129        public void done() {
130            // noop
131        }
132
133    }
134
135    /**
136     * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
137     * <p/>
138     * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
139     */
140    static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
141
142        PreparedErrorHandler(RouteContext key, Processor value) {
143            super(key, value);
144        }
145
146    }
147
148    protected final Processor onPrepare;
149    private final CamelContext camelContext;
150    private String id;
151    private Collection<Processor> processors;
152    private final AggregationStrategy aggregationStrategy;
153    private final boolean parallelProcessing;
154    private final boolean streaming;
155    private final boolean parallelAggregate;
156    private final boolean stopOnAggregateException;
157    private final boolean stopOnException;
158    private final ExecutorService executorService;
159    private final boolean shutdownExecutorService;
160    private ExecutorService aggregateExecutorService;
161    private final long timeout;
162    private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
163    private final boolean shareUnitOfWork;
164
165    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
166        this(camelContext, processors, null);
167    }
168
169    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
170        this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false);
171    }
172
173    @Deprecated
174    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
175                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
176                              boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
177        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService,
178                streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false);
179    }
180
181    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy, boolean parallelProcessing,
182                              ExecutorService executorService, boolean shutdownExecutorService, boolean streaming, boolean stopOnException, long timeout, Processor onPrepare,
183                              boolean shareUnitOfWork, boolean parallelAggregate) {
184        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService, streaming, stopOnException, timeout, onPrepare,
185             shareUnitOfWork, false, false);
186    }
187    
188    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
189                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming,
190                              boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork,
191                              boolean parallelAggregate, boolean stopOnAggregateException) {
192        notNull(camelContext, "camelContext");
193        this.camelContext = camelContext;
194        this.processors = processors;
195        this.aggregationStrategy = aggregationStrategy;
196        this.executorService = executorService;
197        this.shutdownExecutorService = shutdownExecutorService;
198        this.streaming = streaming;
199        this.stopOnException = stopOnException;
200        // must enable parallel if executor service is provided
201        this.parallelProcessing = parallelProcessing || executorService != null;
202        this.timeout = timeout;
203        this.onPrepare = onPrepare;
204        this.shareUnitOfWork = shareUnitOfWork;
205        this.parallelAggregate = parallelAggregate;
206        this.stopOnAggregateException = stopOnAggregateException;
207    }
208
209    @Override
210    public String toString() {
211        return "Multicast[" + getProcessors() + "]";
212    }
213
214    public String getId() {
215        return id;
216    }
217
218    public void setId(String id) {
219        this.id = id;
220    }
221
222    public String getTraceLabel() {
223        return "multicast";
224    }
225
226    public CamelContext getCamelContext() {
227        return camelContext;
228    }
229
230    public void process(Exchange exchange) throws Exception {
231        AsyncProcessorHelper.process(this, exchange);
232    }
233
234    public boolean process(Exchange exchange, AsyncCallback callback) {
235        final AtomicExchange result = new AtomicExchange();
236        Iterable<ProcessorExchangePair> pairs = null;
237
238        try {
239            boolean sync = true;
240
241            pairs = createProcessorExchangePairs(exchange);
242
243            if (isParallelProcessing()) {
244                // ensure an executor is set when running in parallel
245                ObjectHelper.notNull(executorService, "executorService", this);
246                doProcessParallel(exchange, result, pairs, isStreaming(), callback);
247            } else {
248                sync = doProcessSequential(exchange, result, pairs, callback);
249            }
250
251            if (!sync) {
252                // the remainder of the multicast will be completed async
253                // so we break out now, then the callback will be invoked which then continue routing from where we left here
254                return false;
255            }
256        } catch (Throwable e) {
257            exchange.setException(e);
258            // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted
259            // and do the done work
260            doDone(exchange, null, pairs, callback, true, false);
261            return true;
262        }
263
264        // multicasting was processed successfully
265        // and do the done work
266        Exchange subExchange = result.get() != null ? result.get() : null;
267        doDone(exchange, subExchange, pairs, callback, true, true);
268        return true;
269    }
270
271    protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
272                                     final boolean streaming, final AsyncCallback callback) throws Exception {
273
274        ObjectHelper.notNull(executorService, "ExecutorService", this);
275        ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
276
277        final CompletionService<Exchange> completion;
278        if (streaming) {
279            // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
280            completion = new ExecutorCompletionService<Exchange>(executorService);
281        } else {
282            // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
283            completion = new SubmitOrderedCompletionService<Exchange>(executorService);
284        }
285
286        final AtomicInteger total = new AtomicInteger(0);
287        final Iterator<ProcessorExchangePair> it = pairs.iterator();
288
289        if (it.hasNext()) {
290            // when parallel then aggregate on the fly
291            final AtomicBoolean running = new AtomicBoolean(true);
292            final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
293            final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
294            final AtomicException executionException = new AtomicException();
295
296            // issue task to execute in separate thread so it can aggregate on-the-fly
297            // while we submit new tasks, and those tasks complete concurrently
298            // this allows us to optimize work and reduce memory consumption
299            final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
300                    aggregationOnTheFlyDone, allTasksSubmitted, executionException);
301            final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
302
303            LOG.trace("Starting to submit parallel tasks");
304            
305            try {
306                while (it.hasNext()) {
307                    final ProcessorExchangePair pair = it.next();
308                    // in case the iterator returns null then continue to next
309                    if (pair == null) {
310                        continue;
311                    }
312    
313                    final Exchange subExchange = pair.getExchange();
314                    updateNewExchange(subExchange, total.intValue(), pairs, it);
315    
316                    completion.submit(new Callable<Exchange>() {
317                        public Exchange call() throws Exception {
318                            // start the aggregation task at this stage only in order not to pile up too many threads
319                            if (aggregationTaskSubmitted.compareAndSet(false, true)) {
320                                // but only submit the aggregation task once
321                                aggregateExecutorService.submit(aggregateOnTheFlyTask);
322                            }
323    
324                            if (!running.get()) {
325                                // do not start processing the task if we are not running
326                                return subExchange;
327                            }
328    
329                            try {
330                                doProcessParallel(pair);
331                            } catch (Throwable e) {
332                                subExchange.setException(e);
333                            }
334    
335                            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
336                            Integer number = getExchangeIndex(subExchange);
337                            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
338                            if (stopOnException && !continueProcessing) {
339                                // signal to stop running
340                                running.set(false);
341                                // throw caused exception
342                                if (subExchange.getException() != null) {
343                                    // wrap in exception to explain where it failed
344                                    CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
345                                    subExchange.setException(cause);
346                                }
347                            }
348    
349                            LOG.trace("Parallel processing complete for exchange: {}", subExchange);
350                            return subExchange;
351                        }
352                    });
353    
354                    total.incrementAndGet();
355                }
356            } catch (Throwable e) {
357                // The methods it.hasNext and it.next can throw RuntimeExceptions when custom iterators are implemented.
358                // We have to catch the exception here otherwise the aggregator threads would pile up.
359                if (e instanceof Exception) {
360                    executionException.set((Exception) e);
361                } else {
362                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
363                }
364            }
365
366            // signal all tasks has been submitted
367            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
368            allTasksSubmitted.set(true);
369
370            // its to hard to do parallel async routing so we let the caller thread be synchronously
371            // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
372            // wait for aggregation to be done
373            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
374            aggregationOnTheFlyDone.await();
375
376            // did we fail for whatever reason, if so throw that caused exception
377            if (executionException.get() != null) {
378                if (LOG.isDebugEnabled()) {
379                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
380                }
381                throw executionException.get();
382            }
383        }
384
385        // no everything is okay so we are done
386        LOG.debug("Done parallel processing {} exchanges", total);
387    }
388
389    /**
390     * Boss worker to control aggregate on-the-fly for completed tasks when using parallel processing.
391     * <p/>
392     * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
393     * before we perform aggregation. Instead this separate thread will run and aggregate when new
394     * completed tasks is done.
395     * <p/>
396     * The logic is fairly complex as this implementation has to keep track how far it got, and also
397     * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
398     * processing when the entire splitting is done.
399     */
400    private final class AggregateOnTheFlyTask implements Runnable {
401
402        private final AtomicExchange result;
403        private final Exchange original;
404        private final AtomicInteger total;
405        private final CompletionService<Exchange> completion;
406        private final AtomicBoolean running;
407        private final CountDownLatch aggregationOnTheFlyDone;
408        private final AtomicBoolean allTasksSubmitted;
409        private final AtomicException executionException;
410
411        private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
412                                      CompletionService<Exchange> completion, AtomicBoolean running,
413                                      CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
414                                      AtomicException executionException) {
415            this.result = result;
416            this.original = original;
417            this.total = total;
418            this.completion = completion;
419            this.running = running;
420            this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
421            this.allTasksSubmitted = allTasksSubmitted;
422            this.executionException = executionException;
423        }
424
425        public void run() {
426            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
427
428            try {
429                aggregateOnTheFly();
430            } catch (Throwable e) {
431                if (e instanceof Exception) {
432                    executionException.set((Exception) e);
433                } else {
434                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
435                }
436            } finally {
437                // must signal we are done so the latch can open and let the other thread continue processing
438                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
439                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
440                aggregationOnTheFlyDone.countDown();
441            }
442        }
443
444        private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
445            final AtomicBoolean timedOut = new AtomicBoolean();
446            boolean stoppedOnException = false;
447            final StopWatch watch = new StopWatch();
448            final AtomicInteger aggregated = new AtomicInteger();
449            boolean done = false;
450            // not a for loop as on the fly may still run
451            while (!done) {
452                // check if we have already aggregate everything
453                if (allTasksSubmitted.get() && aggregated.intValue() >= total.get()) {
454                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
455                    break;
456                }
457
458                Future<Exchange> future;
459                if (timedOut.get()) {
460                    // we are timed out but try to grab if some tasks has been completed
461                    // poll will return null if no tasks is present
462                    future = completion.poll();
463                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
464                } else if (timeout > 0) {
465                    long left = timeout - watch.taken();
466                    if (left < 0) {
467                        left = 0;
468                    }
469                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
470                    future = completion.poll(left, TimeUnit.MILLISECONDS);
471                } else {
472                    LOG.trace("Polling completion task #{}", aggregated);
473                    // we must not block so poll every second
474                    future = completion.poll(1, TimeUnit.SECONDS);
475                    if (future == null) {
476                        // and continue loop which will recheck if we are done
477                        continue;
478                    }
479                }
480
481                if (future == null) {
482                    ParallelAggregateTimeoutTask task = new ParallelAggregateTimeoutTask(original, result, completion, aggregated, total, timedOut);
483                    if (parallelAggregate) {
484                        aggregateExecutorService.submit(task);
485                    } else {
486                        // in non parallel mode then just run the task
487                        task.run();
488                    }
489                } else {
490                    // there is a result to aggregate
491                    Exchange subExchange = future.get();
492
493                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
494                    Integer number = getExchangeIndex(subExchange);
495                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
496                    if (stopOnException && !continueProcessing) {
497                        // we want to stop on exception and an exception or failure occurred
498                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
499                        // so we should set the failed exchange as the result and break out
500                        result.set(subExchange);
501                        stoppedOnException = true;
502                        break;
503                    }
504
505                    // we got a result so aggregate it
506                    ParallelAggregateTask task = new ParallelAggregateTask(result, subExchange, aggregated);
507                    if (parallelAggregate) {
508                        aggregateExecutorService.submit(task);
509                    } else {
510                        // in non parallel mode then just run the task
511                        task.run();
512                    }
513                }
514            }
515
516            if (timedOut.get() || stoppedOnException) {
517                if (timedOut.get()) {
518                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
519                }
520                if (stoppedOnException) {
521                    LOG.debug("Cancelling tasks due stopOnException.");
522                }
523                // cancel tasks as we timed out (its safe to cancel done tasks)
524                running.set(false);
525            }
526        }
527    }
528
529    /**
530     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
531     */
532    private final class ParallelAggregateTask implements Runnable {
533
534        private final AtomicExchange result;
535        private final Exchange subExchange;
536        private final AtomicInteger aggregated;
537
538        private ParallelAggregateTask(AtomicExchange result, Exchange subExchange, AtomicInteger aggregated) {
539            this.result = result;
540            this.subExchange = subExchange;
541            this.aggregated = aggregated;
542        }
543
544        @Override
545        public void run() {
546            try {
547                if (parallelAggregate) {
548                    doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
549                } else {
550                    doAggregate(getAggregationStrategy(subExchange), result, subExchange);
551                }
552            } catch (Throwable e) {
553                if (isStopOnAggregateException()) {
554                    throw e;
555                } else {
556                    // wrap in exception to explain where it failed
557                    CamelExchangeException cex = new CamelExchangeException("Parallel processing failed for number " + aggregated.get(), subExchange, e);
558                    subExchange.setException(cex);
559                    LOG.debug(cex.getMessage(), cex);
560                }
561            } finally {
562                aggregated.incrementAndGet();
563            }
564        }
565    }
566
567    /**
568     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
569     */
570    private final class ParallelAggregateTimeoutTask implements Runnable {
571
572        private final Exchange original;
573        private final AtomicExchange result;
574        private final CompletionService<Exchange> completion;
575        private final AtomicInteger aggregated;
576        private final AtomicInteger total;
577        private final AtomicBoolean timedOut;
578
579        private ParallelAggregateTimeoutTask(Exchange original, AtomicExchange result, CompletionService<Exchange> completion,
580                                             AtomicInteger aggregated, AtomicInteger total, AtomicBoolean timedOut) {
581            this.original = original;
582            this.result = result;
583            this.completion = completion;
584            this.aggregated = aggregated;
585            this.total = total;
586            this.timedOut = timedOut;
587        }
588
589        @Override
590        public void run() {
591            AggregationStrategy strategy = getAggregationStrategy(null);
592            if (strategy instanceof DelegateAggregationStrategy) {
593                strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
594            }
595            if (strategy instanceof TimeoutAwareAggregationStrategy) {
596                // notify the strategy we timed out
597                Exchange oldExchange = result.get();
598                if (oldExchange == null) {
599                    // if they all timed out the result may not have been set yet, so use the original exchange
600                    oldExchange = original;
601                }
602                ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated.intValue(), total.intValue(), timeout);
603            } else {
604                // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
605                LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue());
606            }
607            LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue());
608            timedOut.set(true);
609
610            // mark that index as timed out, which allows us to try to retrieve
611            // any already completed tasks in the next loop
612            if (completion instanceof SubmitOrderedCompletionService) {
613                ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
614            }
615
616            // we timed out so increment the counter
617            aggregated.incrementAndGet();
618        }
619    }
620
621    protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
622        AtomicInteger total = new AtomicInteger();
623        Iterator<ProcessorExchangePair> it = pairs.iterator();
624
625        while (it.hasNext()) {
626            ProcessorExchangePair pair = it.next();
627            // in case the iterator returns null then continue to next
628            if (pair == null) {
629                continue;
630            }
631            Exchange subExchange = pair.getExchange();
632            updateNewExchange(subExchange, total.get(), pairs, it);
633
634            boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
635            if (!sync) {
636                if (LOG.isTraceEnabled()) {
637                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
638                }
639                // the remainder of the multicast will be completed async
640                // so we break out now, then the callback will be invoked which then continue routing from where we left here
641                return false;
642            }
643
644            if (LOG.isTraceEnabled()) {
645                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
646            }
647
648            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
649            // remember to test for stop on exception and aggregate before copying back results
650            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
651            if (stopOnException && !continueProcessing) {
652                if (subExchange.getException() != null) {
653                    // wrap in exception to explain where it failed
654                    CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
655                    subExchange.setException(cause);
656                }
657                // we want to stop on exception, and the exception was handled by the error handler
658                // this is similar to what the pipeline does, so we should do the same to not surprise end users
659                // so we should set the failed exchange as the result and be done
660                result.set(subExchange);
661                return true;
662            }
663
664            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
665
666            if (parallelAggregate) {
667                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
668            } else {
669                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
670            }
671
672            total.incrementAndGet();
673        }
674
675        LOG.debug("Done sequential processing {} exchanges", total);
676
677        return true;
678    }
679
680    private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
681                                        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
682                                        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
683        boolean sync = true;
684
685        final Exchange exchange = pair.getExchange();
686        Processor processor = pair.getProcessor();
687        final Producer producer = pair.getProducer();
688
689        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
690
691        try {
692            // prepare tracing starting from a new block
693            if (traced != null) {
694                traced.pushBlock();
695            }
696
697            StopWatch sw = null;
698            if (producer != null) {
699                boolean sending = EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
700                if (sending) {
701                    sw = new StopWatch();
702                }
703            }
704
705            // compute time taken if sending to another endpoint
706            final StopWatch watch = sw;
707
708            // let the prepared process it, remember to begin the exchange pair
709            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
710            pair.begin();
711            sync = async.process(exchange, new AsyncCallback() {
712                public void done(boolean doneSync) {
713                    // we are done with the exchange pair
714                    pair.done();
715
716                    // okay we are done, so notify the exchange was sent
717                    if (producer != null && watch != null) {
718                        long timeTaken = watch.taken();
719                        Endpoint endpoint = producer.getEndpoint();
720                        // emit event that the exchange was sent to the endpoint
721                        EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
722                    }
723
724                    // we only have to handle async completion of the routing slip
725                    if (doneSync) {
726                        return;
727                    }
728
729                    // continue processing the multicast asynchronously
730                    Exchange subExchange = exchange;
731
732                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
733                    // remember to test for stop on exception and aggregate before copying back results
734                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
735                    if (stopOnException && !continueProcessing) {
736                        if (subExchange.getException() != null) {
737                            // wrap in exception to explain where it failed
738                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
739                        } else {
740                            // we want to stop on exception, and the exception was handled by the error handler
741                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
742                            // so we should set the failed exchange as the result and be done
743                            result.set(subExchange);
744                        }
745                        // and do the done work
746                        doDone(original, subExchange, pairs, callback, false, true);
747                        return;
748                    }
749
750                    try {
751                        if (parallelAggregate) {
752                            doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
753                        } else {
754                            doAggregate(getAggregationStrategy(subExchange), result, subExchange);
755                        }
756                    } catch (Throwable e) {
757                        // wrap in exception to explain where it failed
758                        subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
759                        // and do the done work
760                        doDone(original, subExchange, pairs, callback, false, true);
761                        return;
762                    }
763
764                    total.incrementAndGet();
765
766                    // maybe there are more processors to multicast
767                    while (it.hasNext()) {
768
769                        // prepare and run the next
770                        ProcessorExchangePair pair = it.next();
771                        subExchange = pair.getExchange();
772                        updateNewExchange(subExchange, total.get(), pairs, it);
773                        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
774
775                        if (!sync) {
776                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
777                            return;
778                        }
779
780                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
781                        // remember to test for stop on exception and aggregate before copying back results
782                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
783                        if (stopOnException && !continueProcessing) {
784                            if (subExchange.getException() != null) {
785                                // wrap in exception to explain where it failed
786                                subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
787                            } else {
788                                // we want to stop on exception, and the exception was handled by the error handler
789                                // this is similar to what the pipeline does, so we should do the same to not surprise end users
790                                // so we should set the failed exchange as the result and be done
791                                result.set(subExchange);
792                            }
793                            // and do the done work
794                            doDone(original, subExchange, pairs, callback, false, true);
795                            return;
796                        }
797
798                        // must catch any exceptions from aggregation
799                        try {
800                            if (parallelAggregate) {
801                                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
802                            } else {
803                                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
804                            }
805                        } catch (Throwable e) {
806                            // wrap in exception to explain where it failed
807                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
808                            // and do the done work
809                            doDone(original, subExchange, pairs, callback, false, true);
810                            return;
811                        }
812
813                        total.incrementAndGet();
814                    }
815
816                    // do the done work
817                    subExchange = result.get() != null ? result.get() : null;
818                    doDone(original, subExchange, pairs, callback, false, true);
819                }
820            });
821        } finally {
822            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
823            if (traced != null) {
824                traced.popBlock();
825            }
826        }
827
828        return sync;
829    }
830
831    private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
832        final Exchange exchange = pair.getExchange();
833        Processor processor = pair.getProcessor();
834        Producer producer = pair.getProducer();
835
836        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
837
838        // compute time taken if sending to another endpoint
839        StopWatch watch = null;
840        try {
841            // prepare tracing starting from a new block
842            if (traced != null) {
843                traced.pushBlock();
844            }
845
846            if (producer != null) {
847                boolean sending = EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
848                if (sending) {
849                    watch = new StopWatch();
850                }
851            }
852            // let the prepared process it, remember to begin the exchange pair
853            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
854            pair.begin();
855            // we invoke it synchronously as parallel async routing is too hard
856            AsyncProcessorHelper.process(async, exchange);
857        } finally {
858            pair.done();
859            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
860            if (traced != null) {
861                traced.popBlock();
862            }
863            if (producer != null && watch != null) {
864                Endpoint endpoint = producer.getEndpoint();
865                long timeTaken = watch.taken();
866                // emit event that the exchange was sent to the endpoint
867                // this is okay to do here in the finally block, as the processing is not using the async routing engine
868                //( we invoke it synchronously as parallel async routing is too hard)
869                EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
870            }
871        }
872    }
873
874    /**
875     * Common work which must be done when we are done multicasting.
876     * <p/>
877     * This logic applies for both running synchronous and asynchronous as there are multiple exist points
878     * when using the asynchronous routing engine. And therefore we want the logic in one method instead
879     * of being scattered.
880     *
881     * @param original     the original exchange
882     * @param subExchange  the current sub exchange, can be <tt>null</tt> for the synchronous part
883     * @param pairs        the pairs with the exchanges to process
884     * @param callback     the callback
885     * @param doneSync     the <tt>doneSync</tt> parameter to call on callback
886     * @param forceExhaust whether or not error handling is exhausted
887     */
888    protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs,
889                          AsyncCallback callback, boolean doneSync, boolean forceExhaust) {
890
891        // we are done so close the pairs iterator
892        if (pairs != null && pairs instanceof Closeable) {
893            IOHelper.close((Closeable) pairs, "pairs", LOG);
894        }
895
896        AggregationStrategy strategy = getAggregationStrategy(subExchange);
897        if (strategy instanceof DelegateAggregationStrategy) {
898            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
899        }
900        // invoke the on completion callback
901        if (strategy instanceof CompletionAwareAggregationStrategy) {
902            ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange);
903        }
904
905        // cleanup any per exchange aggregation strategy
906        removeAggregationStrategyFromExchange(original);
907
908        // we need to know if there was an exception, and if the stopOnException option was enabled
909        // also we would need to know if any error handler has attempted redelivery and exhausted
910        boolean stoppedOnException = false;
911        boolean exception = false;
912        boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange));
913        if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
914            // there was an exception and we stopped
915            stoppedOnException = isStopOnException();
916            exception = true;
917        }
918
919        // must copy results at this point
920        if (subExchange != null) {
921            if (stoppedOnException) {
922                // if we stopped due an exception then only propagate the exception
923                original.setException(subExchange.getException());
924            } else {
925                // copy the current result to original so it will contain this result of this eip
926                ExchangeHelper.copyResults(original, subExchange);
927            }
928        }
929
930        // .. and then if there was an exception we need to configure the redelivery exhaust
931        // for example the noErrorHandler will not cause redelivery exhaust so if this error
932        // handled has been in use, then the exhaust would be false (if not forced)
933        if (exception) {
934            // multicast uses error handling on its output processors and they have tried to redeliver
935            // so we shall signal back to the other error handlers that we are exhausted and they should not
936            // also try to redeliver as we will then do that twice
937            original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
938        }
939
940        callback.done(doneSync);
941    }
942
943    /**
944     * Aggregate the {@link Exchange} with the current result.
945     * This method is synchronized and is called directly when parallelAggregate is disabled (by default).
946     *
947     * @param strategy the aggregation strategy to use
948     * @param result   the current result
949     * @param exchange the exchange to be added to the result
950     * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
951     */
952    protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
953        doAggregateInternal(strategy, result, exchange);
954    }
955
956    /**
957     * Aggregate the {@link Exchange} with the current result.
958     * This method is unsynchronized and is called directly when parallelAggregate is enabled.
959     * In all other cases, this method is called from the doAggregate which is a synchronized method
960     *
961     * @param strategy the aggregation strategy to use
962     * @param result   the current result
963     * @param exchange the exchange to be added to the result
964     * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
965     */
966    protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
967        if (strategy != null) {
968            // prepare the exchanges for aggregation
969            Exchange oldExchange = result.get();
970            ExchangeHelper.prepareAggregation(oldExchange, exchange);
971            result.set(strategy.aggregate(oldExchange, exchange));
972        }
973    }
974
975    protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
976                                     Iterator<ProcessorExchangePair> it) {
977        exchange.setProperty(Exchange.MULTICAST_INDEX, index);
978        if (it.hasNext()) {
979            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
980        } else {
981            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
982        }
983    }
984
985    protected Integer getExchangeIndex(Exchange exchange) {
986        return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
987    }
988
989    protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
990        List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
991
992        StreamCache streamCache = null;
993        if (isParallelProcessing() && exchange.getIn().getBody() instanceof StreamCache) {
994            // in parallel processing case, the stream must be copied, therefore get the stream
995            streamCache = (StreamCache) exchange.getIn().getBody();
996        }
997
998        int index = 0;
999        for (Processor processor : processors) {
1000            // copy exchange, and do not share the unit of work
1001            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
1002
1003            if (streamCache != null) {
1004                if (index > 0) {
1005                    // copy it otherwise parallel processing is not possible,
1006                    // because streams can only be read once
1007                    StreamCache copiedStreamCache = streamCache.copy(copy);
1008                    if (copiedStreamCache != null) {
1009                        copy.getIn().setBody(copiedStreamCache);
1010                    }
1011                }
1012            }
1013
1014            // If the multi-cast processor has an aggregation strategy
1015            // then the StreamCache created by the child routes must not be 
1016            // closed by the unit of work of the child route, but by the unit of 
1017            // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting).
1018            // Set therefore the unit of work of the  parent route as stream cache unit of work, 
1019            // if it is not already set.
1020            if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) {
1021                copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork());
1022            }
1023            // if we share unit of work, we need to prepare the child exchange
1024            if (isShareUnitOfWork()) {
1025                prepareSharedUnitOfWork(copy, exchange);
1026            }
1027
1028            // and add the pair
1029            RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
1030            result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
1031        }
1032
1033        if (exchange.getException() != null) {
1034            // force any exceptions occurred during creation of exchange paris to be thrown
1035            // before returning the answer;
1036            throw exchange.getException();
1037        }
1038
1039        return result;
1040    }
1041
1042    /**
1043     * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
1044     * <p/>
1045     * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
1046     * need to be specially prepared before use.
1047     *
1048     * @param index        the index
1049     * @param processor    the processor
1050     * @param exchange     the exchange
1051     * @param routeContext the route context
1052     * @return prepared for use
1053     */
1054    protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
1055                                                                RouteContext routeContext) {
1056        Processor prepared = processor;
1057
1058        // set property which endpoint we send to
1059        setToEndpoint(exchange, prepared);
1060
1061        // rework error handling to support fine grained error handling
1062        prepared = createErrorHandler(routeContext, exchange, prepared);
1063
1064        // invoke on prepare on the exchange if specified
1065        if (onPrepare != null) {
1066            try {
1067                onPrepare.process(exchange);
1068            } catch (Exception e) {
1069                exchange.setException(e);
1070            }
1071        }
1072        return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
1073    }
1074
1075    protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
1076        Processor answer;
1077
1078        boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class);
1079
1080        // do not wrap in error handler if we are inside a try block
1081        if (!tryBlock && routeContext != null) {
1082            // wrap the producer in error handler so we have fine grained error handling on
1083            // the output side instead of the input side
1084            // this is needed to support redelivery on that output alone and not doing redelivery
1085            // for the entire multicast block again which will start from scratch again
1086
1087            // create key for cache
1088            final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
1089
1090            // lookup cached first to reuse and preserve memory
1091            answer = errorHandlers.get(key);
1092            if (answer != null) {
1093                LOG.trace("Using existing error handler for: {}", processor);
1094                return answer;
1095            }
1096
1097            LOG.trace("Creating error handler for: {}", processor);
1098            ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
1099            // create error handler (create error handler directly to keep it light weight,
1100            // instead of using ProcessorDefinition.wrapInErrorHandler)
1101            try {
1102                processor = builder.createErrorHandler(routeContext, processor);
1103
1104                // and wrap in unit of work processor so the copy exchange also can run under UoW
1105                answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1106
1107                boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null;
1108
1109                // must start the error handler
1110                ServiceHelper.startServices(answer);
1111
1112                // here we don't cache the child unit of work
1113                if (!child) {
1114                    // add to cache
1115                    errorHandlers.putIfAbsent(key, answer);
1116                }
1117
1118            } catch (Exception e) {
1119                throw ObjectHelper.wrapRuntimeCamelException(e);
1120            }
1121        } else {
1122            // and wrap in unit of work processor so the copy exchange also can run under UoW
1123            answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1124        }
1125
1126        return answer;
1127    }
1128
1129    /**
1130     * Strategy to create the unit of work to be used for the sub route
1131     *
1132     * @param routeContext the route context
1133     * @param processor    the processor
1134     * @param exchange     the exchange
1135     * @return the unit of work processor
1136     */
1137    protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
1138        CamelInternalProcessor internal = new CamelInternalProcessor(processor);
1139
1140        // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW
1141        UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
1142        if (parent != null) {
1143            internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeContext, parent));
1144        } else {
1145            internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeContext));
1146        }
1147
1148        return internal;
1149    }
1150
1151    /**
1152     * Prepares the exchange for participating in a shared unit of work
1153     * <p/>
1154     * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
1155     * in a shared unit of work.
1156     *
1157     * @param childExchange  the child exchange
1158     * @param parentExchange the parent exchange
1159     */
1160    protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
1161        childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
1162    }
1163
1164    protected void doStart() throws Exception {
1165        if (isParallelProcessing() && executorService == null) {
1166            throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
1167        }
1168        if (timeout > 0 && !isParallelProcessing()) {
1169            throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
1170        }
1171        if (isParallelProcessing() && aggregateExecutorService == null) {
1172            // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
1173            // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
1174            // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
1175            String name = getClass().getSimpleName() + "-AggregateTask";
1176            aggregateExecutorService = createAggregateExecutorService(name);
1177        }
1178        if (aggregationStrategy instanceof CamelContextAware) {
1179            ((CamelContextAware) aggregationStrategy).setCamelContext(camelContext);
1180        }
1181
1182        ServiceHelper.startServices(aggregationStrategy, processors);
1183    }
1184
1185    /**
1186     * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
1187     * completed tasks when running in parallel mode.
1188     *
1189     * @param name  the suggested name for the background thread
1190     * @return the thread pool
1191     */
1192    protected synchronized ExecutorService createAggregateExecutorService(String name) {
1193        // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
1194        return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
1195    }
1196
1197    @Override
1198    protected void doStop() throws Exception {
1199        ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy);
1200    }
1201
1202    @Override
1203    protected void doShutdown() throws Exception {
1204        ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy);
1205        // only clear error handlers when shutting down
1206        errorHandlers.clear();
1207
1208        if (shutdownExecutorService && executorService != null) {
1209            getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
1210        }
1211        if (aggregateExecutorService != null) {
1212            getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService);
1213        }
1214    }
1215
1216    protected static void setToEndpoint(Exchange exchange, Processor processor) {
1217        if (processor instanceof Producer) {
1218            Producer producer = (Producer) processor;
1219            exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
1220        }
1221    }
1222
1223    protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
1224        AggregationStrategy answer = null;
1225
1226        // prefer to use per Exchange aggregation strategy over a global strategy
1227        if (exchange != null) {
1228            Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1229            Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1230            if (map != null) {
1231                answer = map.get(this);
1232            }
1233        }
1234        if (answer == null) {
1235            // fallback to global strategy
1236            answer = getAggregationStrategy();
1237        }
1238        return answer;
1239    }
1240
1241    /**
1242     * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
1243     *
1244     * @param exchange            the exchange
1245     * @param aggregationStrategy the strategy
1246     */
1247    protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1248        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1249        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1250        if (map == null) {
1251            map = new ConcurrentHashMap<Object, AggregationStrategy>();
1252        } else {
1253            // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties
1254            // we just create a new copy if we need to change the map
1255            map = new ConcurrentHashMap<Object, AggregationStrategy>(map);
1256        }
1257        // store the strategy using this processor as the key
1258        // (so we can store multiple strategies on the same exchange)
1259        map.put(this, aggregationStrategy);
1260        exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1261    }
1262
1263    /**
1264     * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1265     * which must be done after use.
1266     *
1267     * @param exchange the current exchange
1268     */
1269    protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1270        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1271        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1272        if (map == null) {
1273            return;
1274        }
1275        // remove the strategy using this processor as the key
1276        map.remove(this);
1277    }
1278
1279    /**
1280     * Is the multicast processor working in streaming mode?
1281     * <p/>
1282     * In streaming mode:
1283     * <ul>
1284     * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1285     * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1286     * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1287     * </ul>
1288     */
1289    public boolean isStreaming() {
1290        return streaming;
1291    }
1292
1293    /**
1294     * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1295     */
1296    public boolean isStopOnException() {
1297        return stopOnException;
1298    }
1299
1300    /**
1301     * Returns the producers to multicast to
1302     */
1303    public Collection<Processor> getProcessors() {
1304        return processors;
1305    }
1306
1307    /**
1308     * An optional timeout in millis when using parallel processing
1309     */
1310    public long getTimeout() {
1311        return timeout;
1312    }
1313
1314    /**
1315     * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1316     */
1317    public AggregationStrategy getAggregationStrategy() {
1318        return aggregationStrategy;
1319    }
1320
1321    public boolean isParallelProcessing() {
1322        return parallelProcessing;
1323    }
1324
1325    public boolean isParallelAggregate() {
1326        return parallelAggregate;
1327    }
1328
1329    public boolean isStopOnAggregateException() {
1330        return stopOnAggregateException;
1331    }
1332
1333    public boolean isShareUnitOfWork() {
1334        return shareUnitOfWork;
1335    }
1336
1337    public List<Processor> next() {
1338        if (!hasNext()) {
1339            return null;
1340        }
1341        return new ArrayList<Processor>(processors);
1342    }
1343
1344    public boolean hasNext() {
1345        return processors != null && !processors.isEmpty();
1346    }
1347}