001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor;
018
019import java.io.Closeable;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Iterator;
023import java.util.List;
024import java.util.Map;
025import java.util.concurrent.Callable;
026import java.util.concurrent.CompletionService;
027import java.util.concurrent.ConcurrentHashMap;
028import java.util.concurrent.ConcurrentMap;
029import java.util.concurrent.CountDownLatch;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorCompletionService;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.concurrent.atomic.AtomicInteger;
037
038import org.apache.camel.AsyncCallback;
039import org.apache.camel.AsyncProcessor;
040import org.apache.camel.CamelContext;
041import org.apache.camel.CamelExchangeException;
042import org.apache.camel.Endpoint;
043import org.apache.camel.ErrorHandlerFactory;
044import org.apache.camel.Exchange;
045import org.apache.camel.Navigate;
046import org.apache.camel.Processor;
047import org.apache.camel.Producer;
048import org.apache.camel.StreamCache;
049import org.apache.camel.Traceable;
050import org.apache.camel.processor.aggregate.AggregationStrategy;
051import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy;
052import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
053import org.apache.camel.spi.RouteContext;
054import org.apache.camel.spi.TracedRouteNodes;
055import org.apache.camel.spi.UnitOfWork;
056import org.apache.camel.support.ServiceSupport;
057import org.apache.camel.util.AsyncProcessorConverterHelper;
058import org.apache.camel.util.AsyncProcessorHelper;
059import org.apache.camel.util.CastUtils;
060import org.apache.camel.util.EventHelper;
061import org.apache.camel.util.ExchangeHelper;
062import org.apache.camel.util.IOHelper;
063import org.apache.camel.util.KeyValueHolder;
064import org.apache.camel.util.ObjectHelper;
065import org.apache.camel.util.ServiceHelper;
066import org.apache.camel.util.StopWatch;
067import org.apache.camel.util.concurrent.AtomicException;
068import org.apache.camel.util.concurrent.AtomicExchange;
069import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
070import org.slf4j.Logger;
071import org.slf4j.LoggerFactory;
072
073import static org.apache.camel.util.ObjectHelper.notNull;
074
075
076/**
077 * Implements the Multicast pattern to send a message exchange to a number of
078 * endpoints, each endpoint receiving a copy of the message exchange.
079 *
080 * @version 
081 * @see Pipeline
082 */
083public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable {
084
085    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
086
087    /**
088     * Class that represent each step in the multicast route to do
089     */
090    static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
091        private final int index;
092        private final Processor processor;
093        private final Processor prepared;
094        private final Exchange exchange;
095
096        private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
097            this.index = index;
098            this.processor = processor;
099            this.prepared = prepared;
100            this.exchange = exchange;
101        }
102
103        public int getIndex() {
104            return index;
105        }
106
107        public Exchange getExchange() {
108            return exchange;
109        }
110
111        public Producer getProducer() {
112            if (processor instanceof Producer) {
113                return (Producer) processor;
114            }
115            return null;
116        }
117
118        public Processor getProcessor() {
119            return prepared;
120        }
121
122        public void begin() {
123            // noop
124        }
125
126        public void done() {
127            // noop
128        }
129
130    }
131
132    /**
133     * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
134     * <p/>
135     * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
136     */
137    static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
138
139        public PreparedErrorHandler(RouteContext key, Processor value) {
140            super(key, value);
141        }
142
143    }
144
145    protected final Processor onPrepare;
146    private final CamelContext camelContext;
147    private Collection<Processor> processors;
148    private final AggregationStrategy aggregationStrategy;
149    private final boolean parallelProcessing;
150    private final boolean streaming;
151    private final boolean parallelAggregate;
152    private final boolean stopOnException;
153    private final ExecutorService executorService;
154    private final boolean shutdownExecutorService;
155    private ExecutorService aggregateExecutorService;
156    private final long timeout;
157    private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
158    private final boolean shareUnitOfWork;
159
160    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
161        this(camelContext, processors, null);
162    }
163
164    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
165        this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false);
166    }
167
168    @Deprecated
169    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
170                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
171                              boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
172        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService,
173                streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false);
174    }
175
176    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
177                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming,
178                              boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork,
179                              boolean parallelAggregate) {
180        notNull(camelContext, "camelContext");
181        this.camelContext = camelContext;
182        this.processors = processors;
183        this.aggregationStrategy = aggregationStrategy;
184        this.executorService = executorService;
185        this.shutdownExecutorService = shutdownExecutorService;
186        this.streaming = streaming;
187        this.stopOnException = stopOnException;
188        // must enable parallel if executor service is provided
189        this.parallelProcessing = parallelProcessing || executorService != null;
190        this.timeout = timeout;
191        this.onPrepare = onPrepare;
192        this.shareUnitOfWork = shareUnitOfWork;
193        this.parallelAggregate = parallelAggregate;
194    }
195
196    @Override
197    public String toString() {
198        return "Multicast[" + getProcessors() + "]";
199    }
200
201    public String getTraceLabel() {
202        return "multicast";
203    }
204
205    public CamelContext getCamelContext() {
206        return camelContext;
207    }
208
209    public void process(Exchange exchange) throws Exception {
210        AsyncProcessorHelper.process(this, exchange);
211    }
212
213    public boolean process(Exchange exchange, AsyncCallback callback) {
214        final AtomicExchange result = new AtomicExchange();
215        Iterable<ProcessorExchangePair> pairs = null;
216
217        try {
218            boolean sync = true;
219
220            pairs = createProcessorExchangePairs(exchange);
221
222            if (isParallelProcessing()) {
223                // ensure an executor is set when running in parallel
224                ObjectHelper.notNull(executorService, "executorService", this);
225                doProcessParallel(exchange, result, pairs, isStreaming(), callback);
226            } else {
227                sync = doProcessSequential(exchange, result, pairs, callback);
228            }
229
230            if (!sync) {
231                // the remainder of the multicast will be completed async
232                // so we break out now, then the callback will be invoked which then continue routing from where we left here
233                return false;
234            }
235        } catch (Throwable e) {
236            exchange.setException(e);
237            // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted
238            // and do the done work
239            doDone(exchange, null, pairs, callback, true, false);
240            return true;
241        }
242
243        // multicasting was processed successfully
244        // and do the done work
245        Exchange subExchange = result.get() != null ? result.get() : null;
246        doDone(exchange, subExchange, pairs, callback, true, true);
247        return true;
248    }
249
250    protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
251                                     final boolean streaming, final AsyncCallback callback) throws Exception {
252
253        ObjectHelper.notNull(executorService, "ExecutorService", this);
254        ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
255
256        final CompletionService<Exchange> completion;
257        if (streaming) {
258            // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
259            completion = new ExecutorCompletionService<Exchange>(executorService);
260        } else {
261            // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
262            completion = new SubmitOrderedCompletionService<Exchange>(executorService);
263        }
264
265        final AtomicInteger total = new AtomicInteger(0);
266        final Iterator<ProcessorExchangePair> it = pairs.iterator();
267
268        if (it.hasNext()) {
269            // when parallel then aggregate on the fly
270            final AtomicBoolean running = new AtomicBoolean(true);
271            final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
272            final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
273            final AtomicException executionException = new AtomicException();
274
275            // issue task to execute in separate thread so it can aggregate on-the-fly
276            // while we submit new tasks, and those tasks complete concurrently
277            // this allows us to optimize work and reduce memory consumption
278            final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
279                    aggregationOnTheFlyDone, allTasksSubmitted, executionException);
280            final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
281
282            LOG.trace("Starting to submit parallel tasks");
283
284            while (it.hasNext()) {
285                final ProcessorExchangePair pair = it.next();
286                final Exchange subExchange = pair.getExchange();
287                updateNewExchange(subExchange, total.intValue(), pairs, it);
288
289                completion.submit(new Callable<Exchange>() {
290                    public Exchange call() throws Exception {
291                        // only start the aggregation task when the task is being executed to avoid staring
292                        // the aggregation task to early and pile up too many threads
293                        if (aggregationTaskSubmitted.compareAndSet(false, true)) {
294                            // but only submit the task once
295                            aggregateExecutorService.submit(aggregateOnTheFlyTask);
296                        }
297
298                        if (!running.get()) {
299                            // do not start processing the task if we are not running
300                            return subExchange;
301                        }
302
303                        try {
304                            doProcessParallel(pair);
305                        } catch (Throwable e) {
306                            subExchange.setException(e);
307                        }
308
309                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
310                        Integer number = getExchangeIndex(subExchange);
311                        boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
312                        if (stopOnException && !continueProcessing) {
313                            // signal to stop running
314                            running.set(false);
315                            // throw caused exception
316                            if (subExchange.getException() != null) {
317                                // wrap in exception to explain where it failed
318                                CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
319                                subExchange.setException(cause);
320                            }
321                        }
322
323                        LOG.trace("Parallel processing complete for exchange: {}", subExchange);
324                        return subExchange;
325                    }
326                });
327
328                total.incrementAndGet();
329            }
330
331            // signal all tasks has been submitted
332            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
333            allTasksSubmitted.set(true);
334
335            // its to hard to do parallel async routing so we let the caller thread be synchronously
336            // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
337            // wait for aggregation to be done
338            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
339            aggregationOnTheFlyDone.await();
340
341            // did we fail for whatever reason, if so throw that caused exception
342            if (executionException.get() != null) {
343                if (LOG.isDebugEnabled()) {
344                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
345                }
346                throw executionException.get();
347            }
348        }
349
350        // no everything is okay so we are done
351        LOG.debug("Done parallel processing {} exchanges", total);
352    }
353
354    /**
355     * Boss worker to control aggregate on-the-fly for completed tasks when using parallel processing.
356     * <p/>
357     * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
358     * before we perform aggregation. Instead this separate thread will run and aggregate when new
359     * completed tasks is done.
360     * <p/>
361     * The logic is fairly complex as this implementation has to keep track how far it got, and also
362     * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
363     * processing when the entire splitting is done.
364     */
365    private final class AggregateOnTheFlyTask implements Runnable {
366
367        private final AtomicExchange result;
368        private final Exchange original;
369        private final AtomicInteger total;
370        private final CompletionService<Exchange> completion;
371        private final AtomicBoolean running;
372        private final CountDownLatch aggregationOnTheFlyDone;
373        private final AtomicBoolean allTasksSubmitted;
374        private final AtomicException executionException;
375
376        private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
377                                      CompletionService<Exchange> completion, AtomicBoolean running,
378                                      CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
379                                      AtomicException executionException) {
380            this.result = result;
381            this.original = original;
382            this.total = total;
383            this.completion = completion;
384            this.running = running;
385            this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
386            this.allTasksSubmitted = allTasksSubmitted;
387            this.executionException = executionException;
388        }
389
390        public void run() {
391            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
392
393            try {
394                aggregateOnTheFly();
395            } catch (Throwable e) {
396                if (e instanceof Exception) {
397                    executionException.set((Exception) e);
398                } else {
399                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
400                }
401            } finally {
402                // must signal we are done so the latch can open and let the other thread continue processing
403                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
404                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
405                aggregationOnTheFlyDone.countDown();
406            }
407        }
408
409        private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
410            final AtomicBoolean timedOut = new AtomicBoolean();
411            boolean stoppedOnException = false;
412            final StopWatch watch = new StopWatch();
413            final AtomicInteger aggregated = new AtomicInteger();
414            boolean done = false;
415            // not a for loop as on the fly may still run
416            while (!done) {
417                // check if we have already aggregate everything
418                if (allTasksSubmitted.get() && aggregated.intValue() >= total.get()) {
419                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
420                    break;
421                }
422
423                Future<Exchange> future;
424                if (timedOut.get()) {
425                    // we are timed out but try to grab if some tasks has been completed
426                    // poll will return null if no tasks is present
427                    future = completion.poll();
428                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
429                } else if (timeout > 0) {
430                    long left = timeout - watch.taken();
431                    if (left < 0) {
432                        left = 0;
433                    }
434                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
435                    future = completion.poll(left, TimeUnit.MILLISECONDS);
436                } else {
437                    LOG.trace("Polling completion task #{}", aggregated);
438                    // we must not block so poll every second
439                    future = completion.poll(1, TimeUnit.SECONDS);
440                    if (future == null) {
441                        // and continue loop which will recheck if we are done
442                        continue;
443                    }
444                }
445
446                if (future == null) {
447                    ParallelAggregateTimeoutTask task = new ParallelAggregateTimeoutTask(original, result, completion, aggregated, total, timedOut);
448                    if (parallelAggregate) {
449                        aggregateExecutorService.submit(task);
450                    } else {
451                        // in non parallel mode then just run the task
452                        task.run();
453                    }
454                } else {
455                    // there is a result to aggregate
456                    Exchange subExchange = future.get();
457
458                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
459                    Integer number = getExchangeIndex(subExchange);
460                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
461                    if (stopOnException && !continueProcessing) {
462                        // we want to stop on exception and an exception or failure occurred
463                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
464                        // so we should set the failed exchange as the result and break out
465                        result.set(subExchange);
466                        stoppedOnException = true;
467                        break;
468                    }
469
470                    // we got a result so aggregate it
471                    ParallelAggregateTask task = new ParallelAggregateTask(result, subExchange, aggregated);
472                    if (parallelAggregate) {
473                        aggregateExecutorService.submit(task);
474                    } else {
475                        // in non parallel mode then just run the task
476                        task.run();
477                    }
478                }
479            }
480
481            if (timedOut.get() || stoppedOnException) {
482                if (timedOut.get()) {
483                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
484                }
485                if (stoppedOnException) {
486                    LOG.debug("Cancelling tasks due stopOnException.");
487                }
488                // cancel tasks as we timed out (its safe to cancel done tasks)
489                running.set(false);
490            }
491        }
492    }
493
494    /**
495     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
496     */
497    private final class ParallelAggregateTask implements Runnable {
498
499        private final AtomicExchange result;
500        private final Exchange subExchange;
501        private final AtomicInteger aggregated;
502
503        private ParallelAggregateTask(AtomicExchange result, Exchange subExchange, AtomicInteger aggregated) {
504            this.result = result;
505            this.subExchange = subExchange;
506            this.aggregated = aggregated;
507        }
508
509        @Override
510        public void run() {
511            if (parallelAggregate) {
512                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
513            } else {
514                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
515            }
516            aggregated.incrementAndGet();
517        }
518    }
519
520    /**
521     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
522     */
523    private final class ParallelAggregateTimeoutTask implements Runnable {
524
525        private final Exchange original;
526        private final AtomicExchange result;
527        private final CompletionService<Exchange> completion;
528        private final AtomicInteger aggregated;
529        private final AtomicInteger total;
530        private final AtomicBoolean timedOut;
531
532        private ParallelAggregateTimeoutTask(Exchange original, AtomicExchange result, CompletionService<Exchange> completion,
533                                             AtomicInteger aggregated, AtomicInteger total, AtomicBoolean timedOut) {
534            this.original = original;
535            this.result = result;
536            this.completion = completion;
537            this.aggregated = aggregated;
538            this.total = total;
539            this.timedOut = timedOut;
540        }
541
542        @Override
543        public void run() {
544            AggregationStrategy strategy = getAggregationStrategy(null);
545            if (strategy instanceof TimeoutAwareAggregationStrategy) {
546                // notify the strategy we timed out
547                Exchange oldExchange = result.get();
548                if (oldExchange == null) {
549                    // if they all timed out the result may not have been set yet, so use the original exchange
550                    oldExchange = original;
551                }
552                ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated.intValue(), total.intValue(), timeout);
553            } else {
554                // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
555                LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue());
556            }
557            LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue());
558            timedOut.set(true);
559
560            // mark that index as timed out, which allows us to try to retrieve
561            // any already completed tasks in the next loop
562            if (completion instanceof SubmitOrderedCompletionService) {
563                ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
564            }
565
566            // we timed out so increment the counter
567            aggregated.incrementAndGet();
568        }
569    }
570
571    protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
572        AtomicInteger total = new AtomicInteger();
573        Iterator<ProcessorExchangePair> it = pairs.iterator();
574
575        while (it.hasNext()) {
576            ProcessorExchangePair pair = it.next();
577            Exchange subExchange = pair.getExchange();
578            updateNewExchange(subExchange, total.get(), pairs, it);
579
580            boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
581            if (!sync) {
582                if (LOG.isTraceEnabled()) {
583                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
584                }
585                // the remainder of the multicast will be completed async
586                // so we break out now, then the callback will be invoked which then continue routing from where we left here
587                return false;
588            }
589
590            if (LOG.isTraceEnabled()) {
591                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
592            }
593
594            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
595            // remember to test for stop on exception and aggregate before copying back results
596            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
597            if (stopOnException && !continueProcessing) {
598                if (subExchange.getException() != null) {
599                    // wrap in exception to explain where it failed
600                    CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
601                    subExchange.setException(cause);
602                }
603                // we want to stop on exception, and the exception was handled by the error handler
604                // this is similar to what the pipeline does, so we should do the same to not surprise end users
605                // so we should set the failed exchange as the result and be done
606                result.set(subExchange);
607                return true;
608            }
609
610            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
611
612            if (parallelAggregate) {
613                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
614            } else {
615                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
616            }
617            
618            total.incrementAndGet();
619        }
620
621        LOG.debug("Done sequential processing {} exchanges", total);
622
623        return true;
624    }
625
626    private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
627                                        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
628                                        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
629        boolean sync = true;
630
631        final Exchange exchange = pair.getExchange();
632        Processor processor = pair.getProcessor();
633        final Producer producer = pair.getProducer();
634
635        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
636
637        // compute time taken if sending to another endpoint
638        final StopWatch watch = producer != null ? new StopWatch() : null;
639
640        try {
641            // prepare tracing starting from a new block
642            if (traced != null) {
643                traced.pushBlock();
644            }
645
646            if (producer != null) {
647                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
648            }
649            // let the prepared process it, remember to begin the exchange pair
650            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
651            pair.begin();
652            sync = async.process(exchange, new AsyncCallback() {
653                public void done(boolean doneSync) {
654                    // we are done with the exchange pair
655                    pair.done();
656
657                    // okay we are done, so notify the exchange was sent
658                    if (producer != null) {
659                        long timeTaken = watch.stop();
660                        Endpoint endpoint = producer.getEndpoint();
661                        // emit event that the exchange was sent to the endpoint
662                        EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
663                    }
664
665                    // we only have to handle async completion of the routing slip
666                    if (doneSync) {
667                        return;
668                    }
669
670                    // continue processing the multicast asynchronously
671                    Exchange subExchange = exchange;
672
673                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
674                    // remember to test for stop on exception and aggregate before copying back results
675                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
676                    if (stopOnException && !continueProcessing) {
677                        if (subExchange.getException() != null) {
678                            // wrap in exception to explain where it failed
679                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
680                        } else {
681                            // we want to stop on exception, and the exception was handled by the error handler
682                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
683                            // so we should set the failed exchange as the result and be done
684                            result.set(subExchange);
685                        }
686                        // and do the done work
687                        doDone(original, subExchange, pairs, callback, false, true);
688                        return;
689                    }
690
691                    try {
692                        if (parallelAggregate) {
693                            doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
694                        } else {
695                            doAggregate(getAggregationStrategy(subExchange), result, subExchange);
696                        }
697                    } catch (Throwable e) {
698                        // wrap in exception to explain where it failed
699                        subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
700                        // and do the done work
701                        doDone(original, subExchange, pairs, callback, false, true);
702                        return;
703                    }
704
705                    total.incrementAndGet();
706
707                    // maybe there are more processors to multicast
708                    while (it.hasNext()) {
709
710                        // prepare and run the next
711                        ProcessorExchangePair pair = it.next();
712                        subExchange = pair.getExchange();
713                        updateNewExchange(subExchange, total.get(), pairs, it);
714                        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
715
716                        if (!sync) {
717                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
718                            return;
719                        }
720
721                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
722                        // remember to test for stop on exception and aggregate before copying back results
723                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
724                        if (stopOnException && !continueProcessing) {
725                            if (subExchange.getException() != null) {
726                                // wrap in exception to explain where it failed
727                                subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
728                            } else {
729                                // we want to stop on exception, and the exception was handled by the error handler
730                                // this is similar to what the pipeline does, so we should do the same to not surprise end users
731                                // so we should set the failed exchange as the result and be done
732                                result.set(subExchange);
733                            }
734                            // and do the done work
735                            doDone(original, subExchange, pairs, callback, false, true);
736                            return;
737                        }
738
739                        // must catch any exceptions from aggregation
740                        try {
741                            if (parallelAggregate) {
742                                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
743                            } else {
744                                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
745                            }
746                        } catch (Throwable e) {
747                            // wrap in exception to explain where it failed
748                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
749                            // and do the done work
750                            doDone(original, subExchange, pairs, callback, false, true);
751                            return;
752                        }
753
754                        total.incrementAndGet();
755                    }
756
757                    // do the done work
758                    subExchange = result.get() != null ? result.get() : null;
759                    doDone(original, subExchange, pairs, callback, false, true);
760                }
761            });
762        } finally {
763            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
764            if (traced != null) {
765                traced.popBlock();
766            }
767        }
768
769        return sync;
770    }
771
772    private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
773        final Exchange exchange = pair.getExchange();
774        Processor processor = pair.getProcessor();
775        Producer producer = pair.getProducer();
776
777        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
778
779        // compute time taken if sending to another endpoint
780        StopWatch watch = null;
781        if (producer != null) {
782            watch = new StopWatch();
783        }
784
785        try {
786            // prepare tracing starting from a new block
787            if (traced != null) {
788                traced.pushBlock();
789            }
790
791            if (producer != null) {
792                EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
793            }
794            // let the prepared process it, remember to begin the exchange pair
795            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
796            pair.begin();
797            // we invoke it synchronously as parallel async routing is too hard
798            AsyncProcessorHelper.process(async, exchange);
799        } finally {
800            pair.done();
801            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
802            if (traced != null) {
803                traced.popBlock();
804            }
805            if (producer != null) {
806                long timeTaken = watch.stop();
807                Endpoint endpoint = producer.getEndpoint();
808                // emit event that the exchange was sent to the endpoint
809                // this is okay to do here in the finally block, as the processing is not using the async routing engine
810                //( we invoke it synchronously as parallel async routing is too hard)
811                EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
812            }
813        }
814    }
815
816    /**
817     * Common work which must be done when we are done multicasting.
818     * <p/>
819     * This logic applies for both running synchronous and asynchronous as there are multiple exist points
820     * when using the asynchronous routing engine. And therefore we want the logic in one method instead
821     * of being scattered.
822     *
823     * @param original     the original exchange
824     * @param subExchange  the current sub exchange, can be <tt>null</tt> for the synchronous part
825     * @param pairs        the pairs with the exchanges to process
826     * @param callback     the callback
827     * @param doneSync     the <tt>doneSync</tt> parameter to call on callback
828     * @param forceExhaust whether or not error handling is exhausted
829     */
830    protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs,
831                          AsyncCallback callback, boolean doneSync, boolean forceExhaust) {
832
833        // we are done so close the pairs iterator
834        if (pairs != null && pairs instanceof Closeable) {
835            IOHelper.close((Closeable) pairs, "pairs", LOG);
836        }
837
838        AggregationStrategy strategy = getAggregationStrategy(subExchange);
839        // invoke the on completion callback
840        if (strategy instanceof CompletionAwareAggregationStrategy) {
841            ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange);
842        }
843
844        // cleanup any per exchange aggregation strategy
845        removeAggregationStrategyFromExchange(original);
846
847        // we need to know if there was an exception, and if the stopOnException option was enabled
848        // also we would need to know if any error handler has attempted redelivery and exhausted
849        boolean stoppedOnException = false;
850        boolean exception = false;
851        boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange));
852        if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
853            // there was an exception and we stopped
854            stoppedOnException = isStopOnException();
855            exception = true;
856        }
857
858        // must copy results at this point
859        if (subExchange != null) {
860            if (stoppedOnException) {
861                // if we stopped due an exception then only propagte the exception
862                original.setException(subExchange.getException());
863            } else {
864                // copy the current result to original so it will contain this result of this eip
865                ExchangeHelper.copyResults(original, subExchange);
866            }
867        }
868
869        // .. and then if there was an exception we need to configure the redelivery exhaust
870        // for example the noErrorHandler will not cause redelivery exhaust so if this error
871        // handled has been in use, then the exhaust would be false (if not forced)
872        if (exception) {
873            // multicast uses error handling on its output processors and they have tried to redeliver
874            // so we shall signal back to the other error handlers that we are exhausted and they should not
875            // also try to redeliver as we will then do that twice
876            original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
877        }
878
879        callback.done(doneSync);
880    }
881
882    /**
883     * Aggregate the {@link Exchange} with the current result.
884     * This method is synchronized and is called directly when parallelAggregate is disabled (by default).
885     *
886     * @param strategy the aggregation strategy to use
887     * @param result   the current result
888     * @param exchange the exchange to be added to the result
889     * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
890     */
891    protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
892        doAggregateInternal(strategy, result, exchange);
893    }
894
895    /**
896     * Aggregate the {@link Exchange} with the current result.
897     * This method is unsynchronized and is called directly when parallelAggregate is enabled.
898     * In all other cases, this method is called from the doAggregate which is a synchronized method
899     *
900     * @param strategy the aggregation strategy to use
901     * @param result   the current result
902     * @param exchange the exchange to be added to the result
903     * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
904     */
905    protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
906        if (strategy != null) {
907            // prepare the exchanges for aggregation
908            Exchange oldExchange = result.get();
909            ExchangeHelper.prepareAggregation(oldExchange, exchange);
910            result.set(strategy.aggregate(oldExchange, exchange));
911        }
912    }
913
914    protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
915                                     Iterator<ProcessorExchangePair> it) {
916        exchange.setProperty(Exchange.MULTICAST_INDEX, index);
917        if (it.hasNext()) {
918            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
919        } else {
920            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
921        }
922    }
923
924    protected Integer getExchangeIndex(Exchange exchange) {
925        return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
926    }
927
928    protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
929        List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
930
931        StreamCache streamCache = null;
932        if (isParallelProcessing() && exchange.getIn().getBody() instanceof StreamCache) {
933            // in parallel processing case, the stream must be copied, therefore get the stream
934            streamCache = (StreamCache) exchange.getIn().getBody();
935        }
936
937        int index = 0;
938        for (Processor processor : processors) {
939            // copy exchange, and do not share the unit of work
940            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
941
942            if (streamCache != null) {
943                if (index > 0) {
944                    // copy it otherwise parallel processing is not possible,
945                    // because streams can only be read once
946                    StreamCache copiedStreamCache = streamCache.copy();
947                    if (copiedStreamCache != null) {
948                        copy.getIn().setBody(copiedStreamCache);  
949                    }
950                }
951            }
952
953            // If the multi-cast processor has an aggregation strategy
954            // then the StreamCache created by the child routes must not be 
955            // closed by the unit of work of the child route, but by the unit of 
956            // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting).
957            // Set therefore the unit of work of the  parent route as stream cache unit of work, 
958            // if it is not already set.
959            if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) {
960                copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork());
961            }
962            // if we share unit of work, we need to prepare the child exchange
963            if (isShareUnitOfWork()) {
964                prepareSharedUnitOfWork(copy, exchange);
965            }
966
967            // and add the pair
968            RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
969            result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
970        }
971
972        if (exchange.getException() != null) {
973            // force any exceptions occurred during creation of exchange paris to be thrown
974            // before returning the answer;
975            throw exchange.getException();
976        }
977
978        return result;
979    }
980
981    /**
982     * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
983     * <p/>
984     * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
985     * need to be specially prepared before use.
986     *
987     * @param index        the index
988     * @param processor    the processor
989     * @param exchange     the exchange
990     * @param routeContext the route context
991     * @return prepared for use
992     */
993    protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
994                                                                RouteContext routeContext) {
995        Processor prepared = processor;
996
997        // set property which endpoint we send to
998        setToEndpoint(exchange, prepared);
999
1000        // rework error handling to support fine grained error handling
1001        prepared = createErrorHandler(routeContext, exchange, prepared);
1002
1003        // invoke on prepare on the exchange if specified
1004        if (onPrepare != null) {
1005            try {
1006                onPrepare.process(exchange);
1007            } catch (Exception e) {
1008                exchange.setException(e);
1009            }
1010        }
1011        return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
1012    }
1013
1014    protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
1015        Processor answer;
1016
1017        boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class);
1018
1019        // do not wrap in error handler if we are inside a try block
1020        if (!tryBlock && routeContext != null) {
1021            // wrap the producer in error handler so we have fine grained error handling on
1022            // the output side instead of the input side
1023            // this is needed to support redelivery on that output alone and not doing redelivery
1024            // for the entire multicast block again which will start from scratch again
1025
1026            // create key for cache
1027            final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
1028
1029            // lookup cached first to reuse and preserve memory
1030            answer = errorHandlers.get(key);
1031            if (answer != null) {
1032                LOG.trace("Using existing error handler for: {}", processor);
1033                return answer;
1034            }
1035
1036            LOG.trace("Creating error handler for: {}", processor);
1037            ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
1038            // create error handler (create error handler directly to keep it light weight,
1039            // instead of using ProcessorDefinition.wrapInErrorHandler)
1040            try {
1041                processor = builder.createErrorHandler(routeContext, processor);
1042
1043                // and wrap in unit of work processor so the copy exchange also can run under UoW
1044                answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1045
1046                boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null;
1047
1048                // must start the error handler
1049                ServiceHelper.startServices(answer);
1050
1051                // here we don't cache the child unit of work
1052                if (!child) {
1053                    // add to cache
1054                    errorHandlers.putIfAbsent(key, answer);
1055                }
1056
1057            } catch (Exception e) {
1058                throw ObjectHelper.wrapRuntimeCamelException(e);
1059            }
1060        } else {
1061            // and wrap in unit of work processor so the copy exchange also can run under UoW
1062            answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1063        }
1064
1065        return answer;
1066    }
1067
1068    /**
1069     * Strategy to create the unit of work to be used for the sub route
1070     *
1071     * @param routeContext the route context
1072     * @param processor    the processor
1073     * @param exchange     the exchange
1074     * @return the unit of work processor
1075     */
1076    protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
1077        String routeId = routeContext != null ? routeContext.getRoute().idOrCreate(routeContext.getCamelContext().getNodeIdFactory()) : null;
1078        CamelInternalProcessor internal = new CamelInternalProcessor(processor);
1079
1080        // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW
1081        UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
1082        if (parent != null) {
1083            internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeId, parent));
1084        } else {
1085            internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeId));
1086        }
1087
1088        // and then in route context so we can keep track which route this is at runtime
1089        if (routeContext != null) {
1090            internal.addAdvice(new CamelInternalProcessor.RouteContextAdvice(routeContext));
1091        }
1092        return internal;
1093    }
1094
1095    /**
1096     * Prepares the exchange for participating in a shared unit of work
1097     * <p/>
1098     * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
1099     * in a shared unit of work.
1100     *
1101     * @param childExchange  the child exchange
1102     * @param parentExchange the parent exchange
1103     */
1104    protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
1105        childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
1106    }
1107
1108    protected void doStart() throws Exception {
1109        if (isParallelProcessing() && executorService == null) {
1110            throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
1111        }
1112        if (timeout > 0 && !isParallelProcessing()) {
1113            throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
1114        }
1115        if (isParallelProcessing() && aggregateExecutorService == null) {
1116            // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
1117            // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
1118            // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
1119            String name = getClass().getSimpleName() + "-AggregateTask";
1120            aggregateExecutorService = createAggregateExecutorService(name);
1121        }
1122        ServiceHelper.startServices(aggregationStrategy, processors);
1123    }
1124
1125    /**
1126     * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
1127     * completed tasks when running in parallel mode.
1128     *
1129     * @param name  the suggested name for the background thread
1130     * @return the thread pool
1131     */
1132    protected synchronized ExecutorService createAggregateExecutorService(String name) {
1133        // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
1134        return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
1135    }
1136
1137    @Override
1138    protected void doStop() throws Exception {
1139        ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy);
1140    }
1141
1142    @Override
1143    protected void doShutdown() throws Exception {
1144        ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy);
1145        // only clear error handlers when shutting down
1146        errorHandlers.clear();
1147
1148        if (shutdownExecutorService && executorService != null) {
1149            getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
1150        }
1151        if (aggregateExecutorService != null) {
1152            getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService);
1153        }
1154    }
1155
1156    protected static void setToEndpoint(Exchange exchange, Processor processor) {
1157        if (processor instanceof Producer) {
1158            Producer producer = (Producer) processor;
1159            exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
1160        }
1161    }
1162
1163    protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
1164        AggregationStrategy answer = null;
1165
1166        // prefer to use per Exchange aggregation strategy over a global strategy
1167        if (exchange != null) {
1168            Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1169            Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1170            if (map != null) {
1171                answer = map.get(this);
1172            }
1173        }
1174        if (answer == null) {
1175            // fallback to global strategy
1176            answer = getAggregationStrategy();
1177        }
1178        return answer;
1179    }
1180
1181    /**
1182     * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
1183     *
1184     * @param exchange            the exchange
1185     * @param aggregationStrategy the strategy
1186     */
1187    protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1188        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1189        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1190        if (map == null) {
1191            map = new ConcurrentHashMap<Object, AggregationStrategy>();
1192        } else {
1193            // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties
1194            // we just create a new copy if we need to change the map
1195            map = new ConcurrentHashMap<Object, AggregationStrategy>(map);
1196        }
1197        // store the strategy using this processor as the key
1198        // (so we can store multiple strategies on the same exchange)
1199        map.put(this, aggregationStrategy);
1200        exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1201    }
1202
1203    /**
1204     * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1205     * which must be done after use.
1206     *
1207     * @param exchange the current exchange
1208     */
1209    protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1210        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1211        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1212        if (map == null) {
1213            return;
1214        }
1215        // remove the strategy using this processor as the key
1216        map.remove(this);
1217    }
1218
1219    /**
1220     * Is the multicast processor working in streaming mode?
1221     * <p/>
1222     * In streaming mode:
1223     * <ul>
1224     * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1225     * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1226     * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1227     * </ul>
1228     */
1229    public boolean isStreaming() {
1230        return streaming;
1231    }
1232
1233    /**
1234     * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1235     */
1236    public boolean isStopOnException() {
1237        return stopOnException;
1238    }
1239
1240    /**
1241     * Returns the producers to multicast to
1242     */
1243    public Collection<Processor> getProcessors() {
1244        return processors;
1245    }
1246
1247    /**
1248     * An optional timeout in millis when using parallel processing
1249     */
1250    public long getTimeout() {
1251        return timeout;
1252    }
1253
1254    /**
1255     * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1256     */
1257    public AggregationStrategy getAggregationStrategy() {
1258        return aggregationStrategy;
1259    }
1260
1261    public boolean isParallelProcessing() {
1262        return parallelProcessing;
1263    }
1264
1265    public boolean isShareUnitOfWork() {
1266        return shareUnitOfWork;
1267    }
1268
1269    public List<Processor> next() {
1270        if (!hasNext()) {
1271            return null;
1272        }
1273        return new ArrayList<Processor>(processors);
1274    }
1275
1276    public boolean hasNext() {
1277        return processors != null && !processors.isEmpty();
1278    }
1279}