001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor;
018
019import java.io.Closeable;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Iterator;
023import java.util.List;
024import java.util.Map;
025import java.util.concurrent.Callable;
026import java.util.concurrent.CompletionService;
027import java.util.concurrent.ConcurrentHashMap;
028import java.util.concurrent.ConcurrentMap;
029import java.util.concurrent.CountDownLatch;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorCompletionService;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.concurrent.atomic.AtomicInteger;
037
038import org.apache.camel.AsyncCallback;
039import org.apache.camel.AsyncProcessor;
040import org.apache.camel.CamelContext;
041import org.apache.camel.CamelExchangeException;
042import org.apache.camel.Endpoint;
043import org.apache.camel.ErrorHandlerFactory;
044import org.apache.camel.Exchange;
045import org.apache.camel.Navigate;
046import org.apache.camel.Processor;
047import org.apache.camel.Producer;
048import org.apache.camel.StreamCache;
049import org.apache.camel.Traceable;
050import org.apache.camel.processor.aggregate.AggregationStrategy;
051import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy;
052import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
053import org.apache.camel.spi.IdAware;
054import org.apache.camel.spi.RouteContext;
055import org.apache.camel.spi.TracedRouteNodes;
056import org.apache.camel.spi.UnitOfWork;
057import org.apache.camel.support.ServiceSupport;
058import org.apache.camel.util.AsyncProcessorConverterHelper;
059import org.apache.camel.util.AsyncProcessorHelper;
060import org.apache.camel.util.CastUtils;
061import org.apache.camel.util.EventHelper;
062import org.apache.camel.util.ExchangeHelper;
063import org.apache.camel.util.IOHelper;
064import org.apache.camel.util.KeyValueHolder;
065import org.apache.camel.util.ObjectHelper;
066import org.apache.camel.util.ServiceHelper;
067import org.apache.camel.util.StopWatch;
068import org.apache.camel.util.concurrent.AtomicException;
069import org.apache.camel.util.concurrent.AtomicExchange;
070import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
071import org.slf4j.Logger;
072import org.slf4j.LoggerFactory;
073
074import static org.apache.camel.util.ObjectHelper.notNull;
075
076
077/**
078 * Implements the Multicast pattern to send a message exchange to a number of
079 * endpoints, each endpoint receiving a copy of the message exchange.
080 *
081 * @version 
082 * @see Pipeline
083 */
084public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
085
086    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
087
088    /**
089     * Class that represent each step in the multicast route to do
090     */
091    static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
092        private final int index;
093        private final Processor processor;
094        private final Processor prepared;
095        private final Exchange exchange;
096
097        private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
098            this.index = index;
099            this.processor = processor;
100            this.prepared = prepared;
101            this.exchange = exchange;
102        }
103
104        public int getIndex() {
105            return index;
106        }
107
108        public Exchange getExchange() {
109            return exchange;
110        }
111
112        public Producer getProducer() {
113            if (processor instanceof Producer) {
114                return (Producer) processor;
115            }
116            return null;
117        }
118
119        public Processor getProcessor() {
120            return prepared;
121        }
122
123        public void begin() {
124            // noop
125        }
126
127        public void done() {
128            // noop
129        }
130
131    }
132
133    /**
134     * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
135     * <p/>
136     * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
137     */
138    static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
139
140        public PreparedErrorHandler(RouteContext key, Processor value) {
141            super(key, value);
142        }
143
144    }
145
146    protected final Processor onPrepare;
147    private final CamelContext camelContext;
148    private String id;
149    private Collection<Processor> processors;
150    private final AggregationStrategy aggregationStrategy;
151    private final boolean parallelProcessing;
152    private final boolean streaming;
153    private final boolean parallelAggregate;
154    private final boolean stopOnException;
155    private final ExecutorService executorService;
156    private final boolean shutdownExecutorService;
157    private ExecutorService aggregateExecutorService;
158    private final long timeout;
159    private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
160    private final boolean shareUnitOfWork;
161
162    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
163        this(camelContext, processors, null);
164    }
165
166    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
167        this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false);
168    }
169
170    @Deprecated
171    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
172                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
173                              boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
174        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService,
175                streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false);
176    }
177
178    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
179                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming,
180                              boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork,
181                              boolean parallelAggregate) {
182        notNull(camelContext, "camelContext");
183        this.camelContext = camelContext;
184        this.processors = processors;
185        this.aggregationStrategy = aggregationStrategy;
186        this.executorService = executorService;
187        this.shutdownExecutorService = shutdownExecutorService;
188        this.streaming = streaming;
189        this.stopOnException = stopOnException;
190        // must enable parallel if executor service is provided
191        this.parallelProcessing = parallelProcessing || executorService != null;
192        this.timeout = timeout;
193        this.onPrepare = onPrepare;
194        this.shareUnitOfWork = shareUnitOfWork;
195        this.parallelAggregate = parallelAggregate;
196    }
197
198    @Override
199    public String toString() {
200        return "Multicast[" + getProcessors() + "]";
201    }
202
203    public String getId() {
204        return id;
205    }
206
207    public void setId(String id) {
208        this.id = id;
209    }
210
211    public String getTraceLabel() {
212        return "multicast";
213    }
214
215    public CamelContext getCamelContext() {
216        return camelContext;
217    }
218
219    public void process(Exchange exchange) throws Exception {
220        AsyncProcessorHelper.process(this, exchange);
221    }
222
223    public boolean process(Exchange exchange, AsyncCallback callback) {
224        final AtomicExchange result = new AtomicExchange();
225        Iterable<ProcessorExchangePair> pairs = null;
226
227        try {
228            boolean sync = true;
229
230            pairs = createProcessorExchangePairs(exchange);
231
232            if (isParallelProcessing()) {
233                // ensure an executor is set when running in parallel
234                ObjectHelper.notNull(executorService, "executorService", this);
235                doProcessParallel(exchange, result, pairs, isStreaming(), callback);
236            } else {
237                sync = doProcessSequential(exchange, result, pairs, callback);
238            }
239
240            if (!sync) {
241                // the remainder of the multicast will be completed async
242                // so we break out now, then the callback will be invoked which then continue routing from where we left here
243                return false;
244            }
245        } catch (Throwable e) {
246            exchange.setException(e);
247            // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted
248            // and do the done work
249            doDone(exchange, null, pairs, callback, true, false);
250            return true;
251        }
252
253        // multicasting was processed successfully
254        // and do the done work
255        Exchange subExchange = result.get() != null ? result.get() : null;
256        doDone(exchange, subExchange, pairs, callback, true, true);
257        return true;
258    }
259
260    protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
261                                     final boolean streaming, final AsyncCallback callback) throws Exception {
262
263        ObjectHelper.notNull(executorService, "ExecutorService", this);
264        ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
265
266        final CompletionService<Exchange> completion;
267        if (streaming) {
268            // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
269            completion = new ExecutorCompletionService<Exchange>(executorService);
270        } else {
271            // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
272            completion = new SubmitOrderedCompletionService<Exchange>(executorService);
273        }
274
275        final AtomicInteger total = new AtomicInteger(0);
276        final Iterator<ProcessorExchangePair> it = pairs.iterator();
277
278        if (it.hasNext()) {
279            // when parallel then aggregate on the fly
280            final AtomicBoolean running = new AtomicBoolean(true);
281            final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
282            final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
283            final AtomicException executionException = new AtomicException();
284
285            // issue task to execute in separate thread so it can aggregate on-the-fly
286            // while we submit new tasks, and those tasks complete concurrently
287            // this allows us to optimize work and reduce memory consumption
288            final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
289                    aggregationOnTheFlyDone, allTasksSubmitted, executionException);
290            final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
291
292            LOG.trace("Starting to submit parallel tasks");
293
294            while (it.hasNext()) {
295                final ProcessorExchangePair pair = it.next();
296                // in case the iterator returns null then continue to next
297                if (pair == null) {
298                    continue;
299                }
300
301                final Exchange subExchange = pair.getExchange();
302                updateNewExchange(subExchange, total.intValue(), pairs, it);
303
304                completion.submit(new Callable<Exchange>() {
305                    public Exchange call() throws Exception {
306                        // only start the aggregation task when the task is being executed to avoid staring
307                        // the aggregation task to early and pile up too many threads
308                        if (aggregationTaskSubmitted.compareAndSet(false, true)) {
309                            // but only submit the task once
310                            aggregateExecutorService.submit(aggregateOnTheFlyTask);
311                        }
312
313                        if (!running.get()) {
314                            // do not start processing the task if we are not running
315                            return subExchange;
316                        }
317
318                        try {
319                            doProcessParallel(pair);
320                        } catch (Throwable e) {
321                            subExchange.setException(e);
322                        }
323
324                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
325                        Integer number = getExchangeIndex(subExchange);
326                        boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
327                        if (stopOnException && !continueProcessing) {
328                            // signal to stop running
329                            running.set(false);
330                            // throw caused exception
331                            if (subExchange.getException() != null) {
332                                // wrap in exception to explain where it failed
333                                CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
334                                subExchange.setException(cause);
335                            }
336                        }
337
338                        LOG.trace("Parallel processing complete for exchange: {}", subExchange);
339                        return subExchange;
340                    }
341                });
342
343                total.incrementAndGet();
344            }
345
346            // signal all tasks has been submitted
347            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
348            allTasksSubmitted.set(true);
349
350            // its to hard to do parallel async routing so we let the caller thread be synchronously
351            // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
352            // wait for aggregation to be done
353            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
354            aggregationOnTheFlyDone.await();
355
356            // did we fail for whatever reason, if so throw that caused exception
357            if (executionException.get() != null) {
358                if (LOG.isDebugEnabled()) {
359                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
360                }
361                throw executionException.get();
362            }
363        }
364
365        // no everything is okay so we are done
366        LOG.debug("Done parallel processing {} exchanges", total);
367    }
368
369    /**
370     * Boss worker to control aggregate on-the-fly for completed tasks when using parallel processing.
371     * <p/>
372     * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
373     * before we perform aggregation. Instead this separate thread will run and aggregate when new
374     * completed tasks is done.
375     * <p/>
376     * The logic is fairly complex as this implementation has to keep track how far it got, and also
377     * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
378     * processing when the entire splitting is done.
379     */
380    private final class AggregateOnTheFlyTask implements Runnable {
381
382        private final AtomicExchange result;
383        private final Exchange original;
384        private final AtomicInteger total;
385        private final CompletionService<Exchange> completion;
386        private final AtomicBoolean running;
387        private final CountDownLatch aggregationOnTheFlyDone;
388        private final AtomicBoolean allTasksSubmitted;
389        private final AtomicException executionException;
390
391        private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
392                                      CompletionService<Exchange> completion, AtomicBoolean running,
393                                      CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
394                                      AtomicException executionException) {
395            this.result = result;
396            this.original = original;
397            this.total = total;
398            this.completion = completion;
399            this.running = running;
400            this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
401            this.allTasksSubmitted = allTasksSubmitted;
402            this.executionException = executionException;
403        }
404
405        public void run() {
406            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
407
408            try {
409                aggregateOnTheFly();
410            } catch (Throwable e) {
411                if (e instanceof Exception) {
412                    executionException.set((Exception) e);
413                } else {
414                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
415                }
416            } finally {
417                // must signal we are done so the latch can open and let the other thread continue processing
418                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
419                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
420                aggregationOnTheFlyDone.countDown();
421            }
422        }
423
424        private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
425            final AtomicBoolean timedOut = new AtomicBoolean();
426            boolean stoppedOnException = false;
427            final StopWatch watch = new StopWatch();
428            final AtomicInteger aggregated = new AtomicInteger();
429            boolean done = false;
430            // not a for loop as on the fly may still run
431            while (!done) {
432                // check if we have already aggregate everything
433                if (allTasksSubmitted.get() && aggregated.intValue() >= total.get()) {
434                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
435                    break;
436                }
437
438                Future<Exchange> future;
439                if (timedOut.get()) {
440                    // we are timed out but try to grab if some tasks has been completed
441                    // poll will return null if no tasks is present
442                    future = completion.poll();
443                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
444                } else if (timeout > 0) {
445                    long left = timeout - watch.taken();
446                    if (left < 0) {
447                        left = 0;
448                    }
449                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
450                    future = completion.poll(left, TimeUnit.MILLISECONDS);
451                } else {
452                    LOG.trace("Polling completion task #{}", aggregated);
453                    // we must not block so poll every second
454                    future = completion.poll(1, TimeUnit.SECONDS);
455                    if (future == null) {
456                        // and continue loop which will recheck if we are done
457                        continue;
458                    }
459                }
460
461                if (future == null) {
462                    ParallelAggregateTimeoutTask task = new ParallelAggregateTimeoutTask(original, result, completion, aggregated, total, timedOut);
463                    if (parallelAggregate) {
464                        aggregateExecutorService.submit(task);
465                    } else {
466                        // in non parallel mode then just run the task
467                        task.run();
468                    }
469                } else {
470                    // there is a result to aggregate
471                    Exchange subExchange = future.get();
472
473                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
474                    Integer number = getExchangeIndex(subExchange);
475                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
476                    if (stopOnException && !continueProcessing) {
477                        // we want to stop on exception and an exception or failure occurred
478                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
479                        // so we should set the failed exchange as the result and break out
480                        result.set(subExchange);
481                        stoppedOnException = true;
482                        break;
483                    }
484
485                    // we got a result so aggregate it
486                    ParallelAggregateTask task = new ParallelAggregateTask(result, subExchange, aggregated);
487                    if (parallelAggregate) {
488                        aggregateExecutorService.submit(task);
489                    } else {
490                        // in non parallel mode then just run the task
491                        task.run();
492                    }
493                }
494            }
495
496            if (timedOut.get() || stoppedOnException) {
497                if (timedOut.get()) {
498                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
499                }
500                if (stoppedOnException) {
501                    LOG.debug("Cancelling tasks due stopOnException.");
502                }
503                // cancel tasks as we timed out (its safe to cancel done tasks)
504                running.set(false);
505            }
506        }
507    }
508
509    /**
510     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
511     */
512    private final class ParallelAggregateTask implements Runnable {
513
514        private final AtomicExchange result;
515        private final Exchange subExchange;
516        private final AtomicInteger aggregated;
517
518        private ParallelAggregateTask(AtomicExchange result, Exchange subExchange, AtomicInteger aggregated) {
519            this.result = result;
520            this.subExchange = subExchange;
521            this.aggregated = aggregated;
522        }
523
524        @Override
525        public void run() {
526            try {
527                if (parallelAggregate) {
528                    doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
529                } else {
530                    doAggregate(getAggregationStrategy(subExchange), result, subExchange);
531                }
532            } catch (Throwable e) {
533                // wrap in exception to explain where it failed
534                subExchange.setException(new CamelExchangeException("Parallel processing failed for number " + aggregated.get(), subExchange, e));
535            } finally {
536                aggregated.incrementAndGet();
537            }
538        }
539    }
540
541    /**
542     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
543     */
544    private final class ParallelAggregateTimeoutTask implements Runnable {
545
546        private final Exchange original;
547        private final AtomicExchange result;
548        private final CompletionService<Exchange> completion;
549        private final AtomicInteger aggregated;
550        private final AtomicInteger total;
551        private final AtomicBoolean timedOut;
552
553        private ParallelAggregateTimeoutTask(Exchange original, AtomicExchange result, CompletionService<Exchange> completion,
554                                             AtomicInteger aggregated, AtomicInteger total, AtomicBoolean timedOut) {
555            this.original = original;
556            this.result = result;
557            this.completion = completion;
558            this.aggregated = aggregated;
559            this.total = total;
560            this.timedOut = timedOut;
561        }
562
563        @Override
564        public void run() {
565            AggregationStrategy strategy = getAggregationStrategy(null);
566            if (strategy instanceof TimeoutAwareAggregationStrategy) {
567                // notify the strategy we timed out
568                Exchange oldExchange = result.get();
569                if (oldExchange == null) {
570                    // if they all timed out the result may not have been set yet, so use the original exchange
571                    oldExchange = original;
572                }
573                ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated.intValue(), total.intValue(), timeout);
574            } else {
575                // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
576                LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue());
577            }
578            LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue());
579            timedOut.set(true);
580
581            // mark that index as timed out, which allows us to try to retrieve
582            // any already completed tasks in the next loop
583            if (completion instanceof SubmitOrderedCompletionService) {
584                ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
585            }
586
587            // we timed out so increment the counter
588            aggregated.incrementAndGet();
589        }
590    }
591
592    protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
593        AtomicInteger total = new AtomicInteger();
594        Iterator<ProcessorExchangePair> it = pairs.iterator();
595
596        while (it.hasNext()) {
597            ProcessorExchangePair pair = it.next();
598            // in case the iterator returns null then continue to next
599            if (pair == null) {
600                continue;
601            }
602            Exchange subExchange = pair.getExchange();
603            updateNewExchange(subExchange, total.get(), pairs, it);
604
605            boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
606            if (!sync) {
607                if (LOG.isTraceEnabled()) {
608                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
609                }
610                // the remainder of the multicast will be completed async
611                // so we break out now, then the callback will be invoked which then continue routing from where we left here
612                return false;
613            }
614
615            if (LOG.isTraceEnabled()) {
616                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
617            }
618
619            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
620            // remember to test for stop on exception and aggregate before copying back results
621            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
622            if (stopOnException && !continueProcessing) {
623                if (subExchange.getException() != null) {
624                    // wrap in exception to explain where it failed
625                    CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
626                    subExchange.setException(cause);
627                }
628                // we want to stop on exception, and the exception was handled by the error handler
629                // this is similar to what the pipeline does, so we should do the same to not surprise end users
630                // so we should set the failed exchange as the result and be done
631                result.set(subExchange);
632                return true;
633            }
634
635            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
636
637            if (parallelAggregate) {
638                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
639            } else {
640                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
641            }
642            
643            total.incrementAndGet();
644        }
645
646        LOG.debug("Done sequential processing {} exchanges", total);
647
648        return true;
649    }
650
651    private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
652                                        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
653                                        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
654        boolean sync = true;
655
656        final Exchange exchange = pair.getExchange();
657        Processor processor = pair.getProcessor();
658        final Producer producer = pair.getProducer();
659
660        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
661
662        // compute time taken if sending to another endpoint
663        final StopWatch watch = producer != null ? new StopWatch() : null;
664
665        try {
666            // prepare tracing starting from a new block
667            if (traced != null) {
668                traced.pushBlock();
669            }
670
671            if (producer != null) {
672                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
673            }
674            // let the prepared process it, remember to begin the exchange pair
675            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
676            pair.begin();
677            sync = async.process(exchange, new AsyncCallback() {
678                public void done(boolean doneSync) {
679                    // we are done with the exchange pair
680                    pair.done();
681
682                    // okay we are done, so notify the exchange was sent
683                    if (producer != null) {
684                        long timeTaken = watch.stop();
685                        Endpoint endpoint = producer.getEndpoint();
686                        // emit event that the exchange was sent to the endpoint
687                        EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
688                    }
689
690                    // we only have to handle async completion of the routing slip
691                    if (doneSync) {
692                        return;
693                    }
694
695                    // continue processing the multicast asynchronously
696                    Exchange subExchange = exchange;
697
698                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
699                    // remember to test for stop on exception and aggregate before copying back results
700                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
701                    if (stopOnException && !continueProcessing) {
702                        if (subExchange.getException() != null) {
703                            // wrap in exception to explain where it failed
704                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
705                        } else {
706                            // we want to stop on exception, and the exception was handled by the error handler
707                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
708                            // so we should set the failed exchange as the result and be done
709                            result.set(subExchange);
710                        }
711                        // and do the done work
712                        doDone(original, subExchange, pairs, callback, false, true);
713                        return;
714                    }
715
716                    try {
717                        if (parallelAggregate) {
718                            doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
719                        } else {
720                            doAggregate(getAggregationStrategy(subExchange), result, subExchange);
721                        }
722                    } catch (Throwable e) {
723                        // wrap in exception to explain where it failed
724                        subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
725                        // and do the done work
726                        doDone(original, subExchange, pairs, callback, false, true);
727                        return;
728                    }
729
730                    total.incrementAndGet();
731
732                    // maybe there are more processors to multicast
733                    while (it.hasNext()) {
734
735                        // prepare and run the next
736                        ProcessorExchangePair pair = it.next();
737                        subExchange = pair.getExchange();
738                        updateNewExchange(subExchange, total.get(), pairs, it);
739                        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
740
741                        if (!sync) {
742                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
743                            return;
744                        }
745
746                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
747                        // remember to test for stop on exception and aggregate before copying back results
748                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
749                        if (stopOnException && !continueProcessing) {
750                            if (subExchange.getException() != null) {
751                                // wrap in exception to explain where it failed
752                                subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
753                            } else {
754                                // we want to stop on exception, and the exception was handled by the error handler
755                                // this is similar to what the pipeline does, so we should do the same to not surprise end users
756                                // so we should set the failed exchange as the result and be done
757                                result.set(subExchange);
758                            }
759                            // and do the done work
760                            doDone(original, subExchange, pairs, callback, false, true);
761                            return;
762                        }
763
764                        // must catch any exceptions from aggregation
765                        try {
766                            if (parallelAggregate) {
767                                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
768                            } else {
769                                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
770                            }
771                        } catch (Throwable e) {
772                            // wrap in exception to explain where it failed
773                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
774                            // and do the done work
775                            doDone(original, subExchange, pairs, callback, false, true);
776                            return;
777                        }
778
779                        total.incrementAndGet();
780                    }
781
782                    // do the done work
783                    subExchange = result.get() != null ? result.get() : null;
784                    doDone(original, subExchange, pairs, callback, false, true);
785                }
786            });
787        } finally {
788            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
789            if (traced != null) {
790                traced.popBlock();
791            }
792        }
793
794        return sync;
795    }
796
797    private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
798        final Exchange exchange = pair.getExchange();
799        Processor processor = pair.getProcessor();
800        Producer producer = pair.getProducer();
801
802        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
803
804        // compute time taken if sending to another endpoint
805        StopWatch watch = null;
806        if (producer != null) {
807            watch = new StopWatch();
808        }
809
810        try {
811            // prepare tracing starting from a new block
812            if (traced != null) {
813                traced.pushBlock();
814            }
815
816            if (producer != null) {
817                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
818            }
819            // let the prepared process it, remember to begin the exchange pair
820            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
821            pair.begin();
822            // we invoke it synchronously as parallel async routing is too hard
823            AsyncProcessorHelper.process(async, exchange);
824        } finally {
825            pair.done();
826            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
827            if (traced != null) {
828                traced.popBlock();
829            }
830            if (producer != null) {
831                long timeTaken = watch.stop();
832                Endpoint endpoint = producer.getEndpoint();
833                // emit event that the exchange was sent to the endpoint
834                // this is okay to do here in the finally block, as the processing is not using the async routing engine
835                //( we invoke it synchronously as parallel async routing is too hard)
836                EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
837            }
838        }
839    }
840
841    /**
842     * Common work which must be done when we are done multicasting.
843     * <p/>
844     * This logic applies for both running synchronous and asynchronous as there are multiple exist points
845     * when using the asynchronous routing engine. And therefore we want the logic in one method instead
846     * of being scattered.
847     *
848     * @param original     the original exchange
849     * @param subExchange  the current sub exchange, can be <tt>null</tt> for the synchronous part
850     * @param pairs        the pairs with the exchanges to process
851     * @param callback     the callback
852     * @param doneSync     the <tt>doneSync</tt> parameter to call on callback
853     * @param forceExhaust whether or not error handling is exhausted
854     */
855    protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs,
856                          AsyncCallback callback, boolean doneSync, boolean forceExhaust) {
857
858        // we are done so close the pairs iterator
859        if (pairs != null && pairs instanceof Closeable) {
860            IOHelper.close((Closeable) pairs, "pairs", LOG);
861        }
862
863        AggregationStrategy strategy = getAggregationStrategy(subExchange);
864        // invoke the on completion callback
865        if (strategy instanceof CompletionAwareAggregationStrategy) {
866            ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange);
867        }
868
869        // cleanup any per exchange aggregation strategy
870        removeAggregationStrategyFromExchange(original);
871
872        // we need to know if there was an exception, and if the stopOnException option was enabled
873        // also we would need to know if any error handler has attempted redelivery and exhausted
874        boolean stoppedOnException = false;
875        boolean exception = false;
876        boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange));
877        if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
878            // there was an exception and we stopped
879            stoppedOnException = isStopOnException();
880            exception = true;
881        }
882
883        // must copy results at this point
884        if (subExchange != null) {
885            if (stoppedOnException) {
886                // if we stopped due an exception then only propagate the exception
887                original.setException(subExchange.getException());
888            } else {
889                // copy the current result to original so it will contain this result of this eip
890                ExchangeHelper.copyResults(original, subExchange);
891            }
892        }
893
894        // .. and then if there was an exception we need to configure the redelivery exhaust
895        // for example the noErrorHandler will not cause redelivery exhaust so if this error
896        // handled has been in use, then the exhaust would be false (if not forced)
897        if (exception) {
898            // multicast uses error handling on its output processors and they have tried to redeliver
899            // so we shall signal back to the other error handlers that we are exhausted and they should not
900            // also try to redeliver as we will then do that twice
901            original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
902        }
903
904        callback.done(doneSync);
905    }
906
907    /**
908     * Aggregate the {@link Exchange} with the current result.
909     * This method is synchronized and is called directly when parallelAggregate is disabled (by default).
910     *
911     * @param strategy the aggregation strategy to use
912     * @param result   the current result
913     * @param exchange the exchange to be added to the result
914     * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
915     */
916    protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
917        doAggregateInternal(strategy, result, exchange);
918    }
919
920    /**
921     * Aggregate the {@link Exchange} with the current result.
922     * This method is unsynchronized and is called directly when parallelAggregate is enabled.
923     * In all other cases, this method is called from the doAggregate which is a synchronized method
924     *
925     * @param strategy the aggregation strategy to use
926     * @param result   the current result
927     * @param exchange the exchange to be added to the result
928     * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
929     */
930    protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
931        if (strategy != null) {
932            // prepare the exchanges for aggregation
933            Exchange oldExchange = result.get();
934            ExchangeHelper.prepareAggregation(oldExchange, exchange);
935            result.set(strategy.aggregate(oldExchange, exchange));
936        }
937    }
938
939    protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
940                                     Iterator<ProcessorExchangePair> it) {
941        exchange.setProperty(Exchange.MULTICAST_INDEX, index);
942        if (it.hasNext()) {
943            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
944        } else {
945            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
946        }
947    }
948
949    protected Integer getExchangeIndex(Exchange exchange) {
950        return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
951    }
952
953    protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
954        List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
955
956        StreamCache streamCache = null;
957        if (isParallelProcessing() && exchange.getIn().getBody() instanceof StreamCache) {
958            // in parallel processing case, the stream must be copied, therefore get the stream
959            streamCache = (StreamCache) exchange.getIn().getBody();
960        }
961
962        int index = 0;
963        for (Processor processor : processors) {
964            // copy exchange, and do not share the unit of work
965            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
966
967            if (streamCache != null) {
968                if (index > 0) {
969                    // copy it otherwise parallel processing is not possible,
970                    // because streams can only be read once
971                    StreamCache copiedStreamCache = streamCache.copy(copy);
972                    if (copiedStreamCache != null) {
973                        copy.getIn().setBody(copiedStreamCache);  
974                    }
975                }
976            }
977
978            // If the multi-cast processor has an aggregation strategy
979            // then the StreamCache created by the child routes must not be 
980            // closed by the unit of work of the child route, but by the unit of 
981            // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting).
982            // Set therefore the unit of work of the  parent route as stream cache unit of work, 
983            // if it is not already set.
984            if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) {
985                copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork());
986            }
987            // if we share unit of work, we need to prepare the child exchange
988            if (isShareUnitOfWork()) {
989                prepareSharedUnitOfWork(copy, exchange);
990            }
991
992            // and add the pair
993            RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
994            result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
995        }
996
997        if (exchange.getException() != null) {
998            // force any exceptions occurred during creation of exchange paris to be thrown
999            // before returning the answer;
1000            throw exchange.getException();
1001        }
1002
1003        return result;
1004    }
1005
1006    /**
1007     * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
1008     * <p/>
1009     * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
1010     * need to be specially prepared before use.
1011     *
1012     * @param index        the index
1013     * @param processor    the processor
1014     * @param exchange     the exchange
1015     * @param routeContext the route context
1016     * @return prepared for use
1017     */
1018    protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
1019                                                                RouteContext routeContext) {
1020        Processor prepared = processor;
1021
1022        // set property which endpoint we send to
1023        setToEndpoint(exchange, prepared);
1024
1025        // rework error handling to support fine grained error handling
1026        prepared = createErrorHandler(routeContext, exchange, prepared);
1027
1028        // invoke on prepare on the exchange if specified
1029        if (onPrepare != null) {
1030            try {
1031                onPrepare.process(exchange);
1032            } catch (Exception e) {
1033                exchange.setException(e);
1034            }
1035        }
1036        return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
1037    }
1038
1039    protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
1040        Processor answer;
1041
1042        boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class);
1043
1044        // do not wrap in error handler if we are inside a try block
1045        if (!tryBlock && routeContext != null) {
1046            // wrap the producer in error handler so we have fine grained error handling on
1047            // the output side instead of the input side
1048            // this is needed to support redelivery on that output alone and not doing redelivery
1049            // for the entire multicast block again which will start from scratch again
1050
1051            // create key for cache
1052            final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
1053
1054            // lookup cached first to reuse and preserve memory
1055            answer = errorHandlers.get(key);
1056            if (answer != null) {
1057                LOG.trace("Using existing error handler for: {}", processor);
1058                return answer;
1059            }
1060
1061            LOG.trace("Creating error handler for: {}", processor);
1062            ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
1063            // create error handler (create error handler directly to keep it light weight,
1064            // instead of using ProcessorDefinition.wrapInErrorHandler)
1065            try {
1066                processor = builder.createErrorHandler(routeContext, processor);
1067
1068                // and wrap in unit of work processor so the copy exchange also can run under UoW
1069                answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1070
1071                boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null;
1072
1073                // must start the error handler
1074                ServiceHelper.startServices(answer);
1075
1076                // here we don't cache the child unit of work
1077                if (!child) {
1078                    // add to cache
1079                    errorHandlers.putIfAbsent(key, answer);
1080                }
1081
1082            } catch (Exception e) {
1083                throw ObjectHelper.wrapRuntimeCamelException(e);
1084            }
1085        } else {
1086            // and wrap in unit of work processor so the copy exchange also can run under UoW
1087            answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1088        }
1089
1090        return answer;
1091    }
1092
1093    /**
1094     * Strategy to create the unit of work to be used for the sub route
1095     *
1096     * @param routeContext the route context
1097     * @param processor    the processor
1098     * @param exchange     the exchange
1099     * @return the unit of work processor
1100     */
1101    protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
1102        CamelInternalProcessor internal = new CamelInternalProcessor(processor);
1103
1104        // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW
1105        UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
1106        if (parent != null) {
1107            internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeContext, parent));
1108        } else {
1109            internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeContext));
1110        }
1111
1112        return internal;
1113    }
1114
1115    /**
1116     * Prepares the exchange for participating in a shared unit of work
1117     * <p/>
1118     * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
1119     * in a shared unit of work.
1120     *
1121     * @param childExchange  the child exchange
1122     * @param parentExchange the parent exchange
1123     */
1124    protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
1125        childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
1126    }
1127
1128    protected void doStart() throws Exception {
1129        if (isParallelProcessing() && executorService == null) {
1130            throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
1131        }
1132        if (timeout > 0 && !isParallelProcessing()) {
1133            throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
1134        }
1135        if (isParallelProcessing() && aggregateExecutorService == null) {
1136            // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
1137            // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
1138            // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
1139            String name = getClass().getSimpleName() + "-AggregateTask";
1140            aggregateExecutorService = createAggregateExecutorService(name);
1141        }
1142        ServiceHelper.startServices(aggregationStrategy, processors);
1143    }
1144
1145    /**
1146     * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
1147     * completed tasks when running in parallel mode.
1148     *
1149     * @param name  the suggested name for the background thread
1150     * @return the thread pool
1151     */
1152    protected synchronized ExecutorService createAggregateExecutorService(String name) {
1153        // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
1154        return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
1155    }
1156
1157    @Override
1158    protected void doStop() throws Exception {
1159        ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy);
1160    }
1161
1162    @Override
1163    protected void doShutdown() throws Exception {
1164        ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy);
1165        // only clear error handlers when shutting down
1166        errorHandlers.clear();
1167
1168        if (shutdownExecutorService && executorService != null) {
1169            getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
1170        }
1171        if (aggregateExecutorService != null) {
1172            getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService);
1173        }
1174    }
1175
1176    protected static void setToEndpoint(Exchange exchange, Processor processor) {
1177        if (processor instanceof Producer) {
1178            Producer producer = (Producer) processor;
1179            exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
1180        }
1181    }
1182
1183    protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
1184        AggregationStrategy answer = null;
1185
1186        // prefer to use per Exchange aggregation strategy over a global strategy
1187        if (exchange != null) {
1188            Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1189            Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1190            if (map != null) {
1191                answer = map.get(this);
1192            }
1193        }
1194        if (answer == null) {
1195            // fallback to global strategy
1196            answer = getAggregationStrategy();
1197        }
1198        return answer;
1199    }
1200
1201    /**
1202     * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
1203     *
1204     * @param exchange            the exchange
1205     * @param aggregationStrategy the strategy
1206     */
1207    protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1208        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1209        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1210        if (map == null) {
1211            map = new ConcurrentHashMap<Object, AggregationStrategy>();
1212        } else {
1213            // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties
1214            // we just create a new copy if we need to change the map
1215            map = new ConcurrentHashMap<Object, AggregationStrategy>(map);
1216        }
1217        // store the strategy using this processor as the key
1218        // (so we can store multiple strategies on the same exchange)
1219        map.put(this, aggregationStrategy);
1220        exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1221    }
1222
1223    /**
1224     * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1225     * which must be done after use.
1226     *
1227     * @param exchange the current exchange
1228     */
1229    protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1230        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1231        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1232        if (map == null) {
1233            return;
1234        }
1235        // remove the strategy using this processor as the key
1236        map.remove(this);
1237    }
1238
1239    /**
1240     * Is the multicast processor working in streaming mode?
1241     * <p/>
1242     * In streaming mode:
1243     * <ul>
1244     * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1245     * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1246     * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1247     * </ul>
1248     */
1249    public boolean isStreaming() {
1250        return streaming;
1251    }
1252
1253    /**
1254     * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1255     */
1256    public boolean isStopOnException() {
1257        return stopOnException;
1258    }
1259
1260    /**
1261     * Returns the producers to multicast to
1262     */
1263    public Collection<Processor> getProcessors() {
1264        return processors;
1265    }
1266
1267    /**
1268     * An optional timeout in millis when using parallel processing
1269     */
1270    public long getTimeout() {
1271        return timeout;
1272    }
1273
1274    /**
1275     * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1276     */
1277    public AggregationStrategy getAggregationStrategy() {
1278        return aggregationStrategy;
1279    }
1280
1281    public boolean isParallelProcessing() {
1282        return parallelProcessing;
1283    }
1284
1285    public boolean isParallelAggregate() {
1286        return parallelAggregate;
1287    }
1288
1289    public boolean isShareUnitOfWork() {
1290        return shareUnitOfWork;
1291    }
1292
1293    public List<Processor> next() {
1294        if (!hasNext()) {
1295            return null;
1296        }
1297        return new ArrayList<Processor>(processors);
1298    }
1299
1300    public boolean hasNext() {
1301        return processors != null && !processors.isEmpty();
1302    }
1303}