001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements. See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License. You may obtain a copy of the License at
008 *
009 * http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017 package org.apache.camel.processor;
018
019 import java.util.ArrayList;
020 import java.util.Collection;
021 import java.util.HashMap;
022 import java.util.Iterator;
023 import java.util.List;
024 import java.util.Map;
025 import java.util.concurrent.Callable;
026 import java.util.concurrent.CompletionService;
027 import java.util.concurrent.ConcurrentHashMap;
028 import java.util.concurrent.ConcurrentMap;
029 import java.util.concurrent.CountDownLatch;
030 import java.util.concurrent.ExecutionException;
031 import java.util.concurrent.ExecutorCompletionService;
032 import java.util.concurrent.ExecutorService;
033 import java.util.concurrent.Future;
034 import java.util.concurrent.TimeUnit;
035 import java.util.concurrent.atomic.AtomicBoolean;
036 import java.util.concurrent.atomic.AtomicInteger;
037
038 import org.apache.camel.AsyncCallback;
039 import org.apache.camel.AsyncProcessor;
040 import org.apache.camel.CamelContext;
041 import org.apache.camel.CamelExchangeException;
042 import org.apache.camel.Endpoint;
043 import org.apache.camel.ErrorHandlerFactory;
044 import org.apache.camel.Exchange;
045 import org.apache.camel.Navigate;
046 import org.apache.camel.Processor;
047 import org.apache.camel.Producer;
048 import org.apache.camel.Traceable;
049 import org.apache.camel.processor.aggregate.AggregationStrategy;
050 import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
051 import org.apache.camel.spi.RouteContext;
052 import org.apache.camel.spi.TracedRouteNodes;
053 import org.apache.camel.spi.UnitOfWork;
054 import org.apache.camel.support.ServiceSupport;
055 import org.apache.camel.util.AsyncProcessorConverterHelper;
056 import org.apache.camel.util.AsyncProcessorHelper;
057 import org.apache.camel.util.CastUtils;
058 import org.apache.camel.util.EventHelper;
059 import org.apache.camel.util.ExchangeHelper;
060 import org.apache.camel.util.KeyValueHolder;
061 import org.apache.camel.util.ObjectHelper;
062 import org.apache.camel.util.ServiceHelper;
063 import org.apache.camel.util.StopWatch;
064 import org.apache.camel.util.concurrent.AtomicException;
065 import org.apache.camel.util.concurrent.AtomicExchange;
066 import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
067 import org.slf4j.Logger;
068 import org.slf4j.LoggerFactory;
069
070 import static org.apache.camel.util.ObjectHelper.notNull;
071
072
073 /**
074 * Implements the Multicast pattern to send a message exchange to a number of
075 * endpoints, each endpoint receiving a copy of the message exchange.
076 *
077 * @version
078 * @see Pipeline
079 */
080 public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable {
081
082 private static final transient Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
083
084 /**
085 * Class that represent each step in the multicast route to do
086 */
087 static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
088 private final int index;
089 private final Processor processor;
090 private final Processor prepared;
091 private final Exchange exchange;
092
093 private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
094 this.index = index;
095 this.processor = processor;
096 this.prepared = prepared;
097 this.exchange = exchange;
098 }
099
100 public int getIndex() {
101 return index;
102 }
103
104 public Exchange getExchange() {
105 return exchange;
106 }
107
108 public Producer getProducer() {
109 if (processor instanceof Producer) {
110 return (Producer) processor;
111 }
112 return null;
113 }
114
115 public Processor getProcessor() {
116 return prepared;
117 }
118
119 public void begin() {
120 // noop
121 }
122
123 public void done() {
124 // noop
125 }
126
127 }
128
129 /**
130 * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
131 * <p/>
132 * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
133 */
134 static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
135
136 public PreparedErrorHandler(RouteContext key, Processor value) {
137 super(key, value);
138 }
139
140 }
141
142 protected final Processor onPrepare;
143 private final CamelContext camelContext;
144 private Collection<Processor> processors;
145 private final AggregationStrategy aggregationStrategy;
146 private final boolean parallelProcessing;
147 private final boolean streaming;
148 private final boolean stopOnException;
149 private final ExecutorService executorService;
150 private final boolean shutdownExecutorService;
151 private ExecutorService aggregateExecutorService;
152 private final long timeout;
153 private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
154 private final boolean shareUnitOfWork;
155
156 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
157 this(camelContext, processors, null);
158 }
159
160 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
161 this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false);
162 }
163
164 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
165 boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
166 boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
167 notNull(camelContext, "camelContext");
168 this.camelContext = camelContext;
169 this.processors = processors;
170 this.aggregationStrategy = aggregationStrategy;
171 this.executorService = executorService;
172 this.shutdownExecutorService = shutdownExecutorService;
173 this.streaming = streaming;
174 this.stopOnException = stopOnException;
175 // must enable parallel if executor service is provided
176 this.parallelProcessing = parallelProcessing || executorService != null;
177 this.timeout = timeout;
178 this.onPrepare = onPrepare;
179 this.shareUnitOfWork = shareUnitOfWork;
180 }
181
182 @Override
183 public String toString() {
184 return "Multicast[" + getProcessors() + "]";
185 }
186
187 public String getTraceLabel() {
188 return "multicast";
189 }
190
191 public CamelContext getCamelContext() {
192 return camelContext;
193 }
194
195 public void process(Exchange exchange) throws Exception {
196 AsyncProcessorHelper.process(this, exchange);
197 }
198
199 public boolean process(Exchange exchange, AsyncCallback callback) {
200 final AtomicExchange result = new AtomicExchange();
201 final Iterable<ProcessorExchangePair> pairs;
202
203 // multicast uses fine grained error handling on the output processors
204 // so use try .. catch to cater for this
205 boolean exhaust = false;
206 try {
207 boolean sync = true;
208
209 pairs = createProcessorExchangePairs(exchange);
210
211 // after we have created the processors we consider the exchange as exhausted if an unhandled
212 // exception was thrown, (used in the catch block)
213 // if the processors is working in Streaming model, the exchange could not be processed at this point.
214 exhaust = !isStreaming();
215
216 if (isParallelProcessing()) {
217 // ensure an executor is set when running in parallel
218 ObjectHelper.notNull(executorService, "executorService", this);
219 doProcessParallel(exchange, result, pairs, isStreaming(), callback);
220 } else {
221 sync = doProcessSequential(exchange, result, pairs, callback);
222 }
223
224 if (!sync) {
225 // the remainder of the multicast will be completed async
226 // so we break out now, then the callback will be invoked which then continue routing from where we left here
227 return false;
228 }
229 } catch (Throwable e) {
230 exchange.setException(e);
231 // and do the done work
232 doDone(exchange, null, callback, true, exhaust);
233 return true;
234 }
235
236 // multicasting was processed successfully
237 // and do the done work
238 Exchange subExchange = result.get() != null ? result.get() : null;
239 doDone(exchange, subExchange, callback, true, exhaust);
240 return true;
241 }
242
243 protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
244 final boolean streaming, final AsyncCallback callback) throws Exception {
245
246 ObjectHelper.notNull(executorService, "ExecutorService", this);
247 ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
248
249 final CompletionService<Exchange> completion;
250 if (streaming) {
251 // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
252 completion = new ExecutorCompletionService<Exchange>(executorService);
253 } else {
254 // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
255 completion = new SubmitOrderedCompletionService<Exchange>(executorService);
256 }
257
258 final AtomicInteger total = new AtomicInteger(0);
259 final Iterator<ProcessorExchangePair> it = pairs.iterator();
260
261 if (it.hasNext()) {
262 // when parallel then aggregate on the fly
263 final AtomicBoolean running = new AtomicBoolean(true);
264 final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
265 final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
266 final AtomicException executionException = new AtomicException();
267
268 // issue task to execute in separate thread so it can aggregate on-the-fly
269 // while we submit new tasks, and those tasks complete concurrently
270 // this allows us to optimize work and reduce memory consumption
271 final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
272 aggregationOnTheFlyDone, allTasksSubmitted, executionException);
273 final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
274
275 LOG.trace("Starting to submit parallel tasks");
276
277 while (it.hasNext()) {
278 final ProcessorExchangePair pair = it.next();
279 final Exchange subExchange = pair.getExchange();
280 updateNewExchange(subExchange, total.intValue(), pairs, it);
281
282 completion.submit(new Callable<Exchange>() {
283 public Exchange call() throws Exception {
284 // only start the aggregation task when the task is being executed to avoid staring
285 // the aggregation task to early and pile up too many threads
286 if (aggregationTaskSubmitted.compareAndSet(false, true)) {
287 // but only submit the task once
288 aggregateExecutorService.submit(aggregateOnTheFlyTask);
289 }
290
291 if (!running.get()) {
292 // do not start processing the task if we are not running
293 return subExchange;
294 }
295
296 try {
297 doProcessParallel(pair);
298 } catch (Throwable e) {
299 subExchange.setException(e);
300 }
301
302 // Decide whether to continue with the multicast or not; similar logic to the Pipeline
303 Integer number = getExchangeIndex(subExchange);
304 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
305 if (stopOnException && !continueProcessing) {
306 // signal to stop running
307 running.set(false);
308 // throw caused exception
309 if (subExchange.getException() != null) {
310 // wrap in exception to explain where it failed
311 throw new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
312 }
313 }
314
315 LOG.trace("Parallel processing complete for exchange: {}", subExchange);
316 return subExchange;
317 }
318 });
319
320 total.incrementAndGet();
321 }
322
323 // signal all tasks has been submitted
324 LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
325 allTasksSubmitted.set(true);
326
327 // its to hard to do parallel async routing so we let the caller thread be synchronously
328 // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
329 // wait for aggregation to be done
330 LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
331 aggregationOnTheFlyDone.await();
332
333 // did we fail for whatever reason, if so throw that caused exception
334 if (executionException.get() != null) {
335 if (LOG.isDebugEnabled()) {
336 LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
337 }
338 throw executionException.get();
339 }
340 }
341
342 // no everything is okay so we are done
343 LOG.debug("Done parallel processing {} exchanges", total);
344 }
345
346 /**
347 * Task to aggregate on-the-fly for completed tasks when using parallel processing.
348 * <p/>
349 * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
350 * before we perform aggregation. Instead this separate thread will run and aggregate when new
351 * completed tasks is done.
352 * <p/>
353 * The logic is fairly complex as this implementation has to keep track how far it got, and also
354 * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
355 * processing when the entire splitting is done.
356 */
357 private final class AggregateOnTheFlyTask implements Runnable {
358
359 private final AtomicExchange result;
360 private final Exchange original;
361 private final AtomicInteger total;
362 private final CompletionService<Exchange> completion;
363 private final AtomicBoolean running;
364 private final CountDownLatch aggregationOnTheFlyDone;
365 private final AtomicBoolean allTasksSubmitted;
366 private final AtomicException executionException;
367
368 private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
369 CompletionService<Exchange> completion, AtomicBoolean running,
370 CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
371 AtomicException executionException) {
372 this.result = result;
373 this.original = original;
374 this.total = total;
375 this.completion = completion;
376 this.running = running;
377 this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
378 this.allTasksSubmitted = allTasksSubmitted;
379 this.executionException = executionException;
380 }
381
382 public void run() {
383 LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
384
385 try {
386 aggregateOnTheFly();
387 } catch (Throwable e) {
388 if (e instanceof Exception) {
389 executionException.set((Exception) e);
390 } else {
391 executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
392 }
393 } finally {
394 // must signal we are done so the latch can open and let the other thread continue processing
395 LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
396 LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
397 aggregationOnTheFlyDone.countDown();
398 }
399 }
400
401 private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
402 boolean timedOut = false;
403 boolean stoppedOnException = false;
404 final StopWatch watch = new StopWatch();
405 int aggregated = 0;
406 boolean done = false;
407 // not a for loop as on the fly may still run
408 while (!done) {
409 // check if we have already aggregate everything
410 if (allTasksSubmitted.get() && aggregated >= total.get()) {
411 LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
412 break;
413 }
414
415 Future<Exchange> future;
416 if (timedOut) {
417 // we are timed out but try to grab if some tasks has been completed
418 // poll will return null if no tasks is present
419 future = completion.poll();
420 LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
421 } else if (timeout > 0) {
422 long left = timeout - watch.taken();
423 if (left < 0) {
424 left = 0;
425 }
426 LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
427 future = completion.poll(left, TimeUnit.MILLISECONDS);
428 } else {
429 LOG.trace("Polling completion task #{}", aggregated);
430 // we must not block so poll every second
431 future = completion.poll(1, TimeUnit.SECONDS);
432 if (future == null) {
433 // and continue loop which will recheck if we are done
434 continue;
435 }
436 }
437
438 if (future == null && timedOut) {
439 // we are timed out and no more tasks complete so break out
440 break;
441 } else if (future == null) {
442 // timeout occurred
443 AggregationStrategy strategy = getAggregationStrategy(null);
444 if (strategy instanceof TimeoutAwareAggregationStrategy) {
445 // notify the strategy we timed out
446 Exchange oldExchange = result.get();
447 if (oldExchange == null) {
448 // if they all timed out the result may not have been set yet, so use the original exchange
449 oldExchange = original;
450 }
451 ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated, total.intValue(), timeout);
452 } else {
453 // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
454 LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated);
455 }
456 LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated);
457 timedOut = true;
458
459 // mark that index as timed out, which allows us to try to retrieve
460 // any already completed tasks in the next loop
461 if (completion instanceof SubmitOrderedCompletionService) {
462 ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
463 }
464 } else {
465 // there is a result to aggregate
466 Exchange subExchange = future.get();
467
468 // Decide whether to continue with the multicast or not; similar logic to the Pipeline
469 Integer number = getExchangeIndex(subExchange);
470 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
471 if (stopOnException && !continueProcessing) {
472 // we want to stop on exception and an exception or failure occurred
473 // this is similar to what the pipeline does, so we should do the same to not surprise end users
474 // so we should set the failed exchange as the result and break out
475 result.set(subExchange);
476 stoppedOnException = true;
477 break;
478 }
479
480 // we got a result so aggregate it
481 AggregationStrategy strategy = getAggregationStrategy(subExchange);
482 doAggregate(strategy, result, subExchange);
483 }
484
485 aggregated++;
486 }
487
488 if (timedOut || stoppedOnException) {
489 if (timedOut) {
490 LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
491 }
492 if (stoppedOnException) {
493 LOG.debug("Cancelling tasks due stopOnException.");
494 }
495 // cancel tasks as we timed out (its safe to cancel done tasks)
496 running.set(false);
497 }
498 }
499 }
500
501 protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
502 AtomicInteger total = new AtomicInteger();
503 Iterator<ProcessorExchangePair> it = pairs.iterator();
504
505 while (it.hasNext()) {
506 ProcessorExchangePair pair = it.next();
507 Exchange subExchange = pair.getExchange();
508 updateNewExchange(subExchange, total.get(), pairs, it);
509
510 boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
511 if (!sync) {
512 if (LOG.isTraceEnabled()) {
513 LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
514 }
515 // the remainder of the multicast will be completed async
516 // so we break out now, then the callback will be invoked which then continue routing from where we left here
517 return false;
518 }
519
520 if (LOG.isTraceEnabled()) {
521 LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
522 }
523
524 // Decide whether to continue with the multicast or not; similar logic to the Pipeline
525 // remember to test for stop on exception and aggregate before copying back results
526 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
527 if (stopOnException && !continueProcessing) {
528 if (subExchange.getException() != null) {
529 // wrap in exception to explain where it failed
530 throw new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
531 } else {
532 // we want to stop on exception, and the exception was handled by the error handler
533 // this is similar to what the pipeline does, so we should do the same to not surprise end users
534 // so we should set the failed exchange as the result and be done
535 result.set(subExchange);
536 return true;
537 }
538 }
539
540 LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
541
542 doAggregate(getAggregationStrategy(subExchange), result, subExchange);
543 total.incrementAndGet();
544 }
545
546 LOG.debug("Done sequential processing {} exchanges", total);
547
548 return true;
549 }
550
551 private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
552 final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
553 final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
554 boolean sync = true;
555
556 final Exchange exchange = pair.getExchange();
557 Processor processor = pair.getProcessor();
558 final Producer producer = pair.getProducer();
559
560 TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
561
562 // compute time taken if sending to another endpoint
563 final StopWatch watch = producer != null ? new StopWatch() : null;
564
565 try {
566 // prepare tracing starting from a new block
567 if (traced != null) {
568 traced.pushBlock();
569 }
570
571 if (producer != null) {
572 EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
573 }
574 // let the prepared process it, remember to begin the exchange pair
575 AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
576 pair.begin();
577 sync = AsyncProcessorHelper.process(async, exchange, new AsyncCallback() {
578 public void done(boolean doneSync) {
579 // we are done with the exchange pair
580 pair.done();
581
582 // okay we are done, so notify the exchange was sent
583 if (producer != null) {
584 long timeTaken = watch.stop();
585 Endpoint endpoint = producer.getEndpoint();
586 // emit event that the exchange was sent to the endpoint
587 EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
588 }
589
590 // we only have to handle async completion of the routing slip
591 if (doneSync) {
592 return;
593 }
594
595 // continue processing the multicast asynchronously
596 Exchange subExchange = exchange;
597
598 // Decide whether to continue with the multicast or not; similar logic to the Pipeline
599 // remember to test for stop on exception and aggregate before copying back results
600 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
601 if (stopOnException && !continueProcessing) {
602 if (subExchange.getException() != null) {
603 // wrap in exception to explain where it failed
604 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
605 } else {
606 // we want to stop on exception, and the exception was handled by the error handler
607 // this is similar to what the pipeline does, so we should do the same to not surprise end users
608 // so we should set the failed exchange as the result and be done
609 result.set(subExchange);
610 }
611 // and do the done work
612 doDone(original, subExchange, callback, false, true);
613 return;
614 }
615
616 try {
617 doAggregate(getAggregationStrategy(subExchange), result, subExchange);
618 } catch (Throwable e) {
619 // wrap in exception to explain where it failed
620 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
621 // and do the done work
622 doDone(original, subExchange, callback, false, true);
623 return;
624 }
625
626 total.incrementAndGet();
627
628 // maybe there are more processors to multicast
629 while (it.hasNext()) {
630
631 // prepare and run the next
632 ProcessorExchangePair pair = it.next();
633 subExchange = pair.getExchange();
634 updateNewExchange(subExchange, total.get(), pairs, it);
635 boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
636
637 if (!sync) {
638 LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
639 return;
640 }
641
642 // Decide whether to continue with the multicast or not; similar logic to the Pipeline
643 // remember to test for stop on exception and aggregate before copying back results
644 continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
645 if (stopOnException && !continueProcessing) {
646 if (subExchange.getException() != null) {
647 // wrap in exception to explain where it failed
648 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
649 } else {
650 // we want to stop on exception, and the exception was handled by the error handler
651 // this is similar to what the pipeline does, so we should do the same to not surprise end users
652 // so we should set the failed exchange as the result and be done
653 result.set(subExchange);
654 }
655 // and do the done work
656 doDone(original, subExchange, callback, false, true);
657 return;
658 }
659
660 // must catch any exceptions from aggregation
661 try {
662 doAggregate(getAggregationStrategy(subExchange), result, subExchange);
663 } catch (Throwable e) {
664 // wrap in exception to explain where it failed
665 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
666 // and do the done work
667 doDone(original, subExchange, callback, false, true);
668 return;
669 }
670
671 total.incrementAndGet();
672 }
673
674 // do the done work
675 subExchange = result.get() != null ? result.get() : null;
676 doDone(original, subExchange, callback, false, true);
677 }
678 });
679 } finally {
680 // pop the block so by next round we have the same staring point and thus the tracing looks accurate
681 if (traced != null) {
682 traced.popBlock();
683 }
684 }
685
686 return sync;
687 }
688
689 private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
690 final Exchange exchange = pair.getExchange();
691 Processor processor = pair.getProcessor();
692 Producer producer = pair.getProducer();
693
694 TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
695
696 // compute time taken if sending to another endpoint
697 StopWatch watch = null;
698 if (producer != null) {
699 watch = new StopWatch();
700 }
701
702 try {
703 // prepare tracing starting from a new block
704 if (traced != null) {
705 traced.pushBlock();
706 }
707
708 if (producer != null) {
709 EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
710 }
711 // let the prepared process it, remember to begin the exchange pair
712 AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
713 pair.begin();
714 // we invoke it synchronously as parallel async routing is too hard
715 AsyncProcessorHelper.process(async, exchange);
716 } finally {
717 pair.done();
718 // pop the block so by next round we have the same staring point and thus the tracing looks accurate
719 if (traced != null) {
720 traced.popBlock();
721 }
722 if (producer != null) {
723 long timeTaken = watch.stop();
724 Endpoint endpoint = producer.getEndpoint();
725 // emit event that the exchange was sent to the endpoint
726 // this is okay to do here in the finally block, as the processing is not using the async routing engine
727 //( we invoke it synchronously as parallel async routing is too hard)
728 EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
729 }
730 }
731 }
732
733 /**
734 * Common work which must be done when we are done multicasting.
735 * <p/>
736 * This logic applies for both running synchronous and asynchronous as there are multiple exist points
737 * when using the asynchronous routing engine. And therefore we want the logic in one method instead
738 * of being scattered.
739 *
740 * @param original the original exchange
741 * @param subExchange the current sub exchange, can be <tt>null</tt> for the synchronous part
742 * @param callback the callback
743 * @param doneSync the <tt>doneSync</tt> parameter to call on callback
744 * @param exhaust whether or not error handling is exhausted
745 */
746 protected void doDone(Exchange original, Exchange subExchange, AsyncCallback callback, boolean doneSync, boolean exhaust) {
747 // cleanup any per exchange aggregation strategy
748 removeAggregationStrategyFromExchange(original);
749 if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
750 // multicast uses error handling on its output processors and they have tried to redeliver
751 // so we shall signal back to the other error handlers that we are exhausted and they should not
752 // also try to redeliver as we will then do that twice
753 original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
754 }
755 if (subExchange != null) {
756 // and copy the current result to original so it will contain this result of this eip
757 ExchangeHelper.copyResults(original, subExchange);
758 }
759 callback.done(doneSync);
760 }
761
762 /**
763 * Aggregate the {@link Exchange} with the current result
764 *
765 * @param strategy the aggregation strategy to use
766 * @param result the current result
767 * @param exchange the exchange to be added to the result
768 */
769 protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
770 if (strategy != null) {
771 // prepare the exchanges for aggregation
772 Exchange oldExchange = result.get();
773 ExchangeHelper.prepareAggregation(oldExchange, exchange);
774 result.set(strategy.aggregate(oldExchange, exchange));
775 }
776 }
777
778 protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
779 Iterator<ProcessorExchangePair> it) {
780 exchange.setProperty(Exchange.MULTICAST_INDEX, index);
781 if (it.hasNext()) {
782 exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
783 } else {
784 exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
785 }
786 }
787
788 protected Integer getExchangeIndex(Exchange exchange) {
789 return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
790 }
791
792 protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
793 List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
794
795 int index = 0;
796 for (Processor processor : processors) {
797 // copy exchange, and do not share the unit of work
798 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
799
800 // if we share unit of work, we need to prepare the child exchange
801 if (isShareUnitOfWork()) {
802 prepareSharedUnitOfWork(copy, exchange);
803 }
804
805 // and add the pair
806 RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
807 result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
808 }
809
810 if (exchange.getException() != null) {
811 // force any exceptions occurred during creation of exchange paris to be thrown
812 // before returning the answer;
813 throw exchange.getException();
814 }
815
816 return result;
817 }
818
819 /**
820 * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
821 * <p/>
822 * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
823 * need to be specially prepared before use.
824 *
825 * @param index the index
826 * @param processor the processor
827 * @param exchange the exchange
828 * @param routeContext the route context
829 * @return prepared for use
830 */
831 protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
832 RouteContext routeContext) {
833 Processor prepared = processor;
834
835 // set property which endpoint we send to
836 setToEndpoint(exchange, prepared);
837
838 // rework error handling to support fine grained error handling
839 prepared = createErrorHandler(routeContext, exchange, prepared);
840
841 // invoke on prepare on the exchange if specified
842 if (onPrepare != null) {
843 try {
844 onPrepare.process(exchange);
845 } catch (Exception e) {
846 exchange.setException(e);
847 }
848 }
849 return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
850 }
851
852 protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
853 Processor answer;
854
855 if (routeContext != null) {
856 // wrap the producer in error handler so we have fine grained error handling on
857 // the output side instead of the input side
858 // this is needed to support redelivery on that output alone and not doing redelivery
859 // for the entire multicast block again which will start from scratch again
860
861 // create key for cache
862 final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
863
864 // lookup cached first to reuse and preserve memory
865 answer = errorHandlers.get(key);
866 if (answer != null) {
867 LOG.trace("Using existing error handler for: {}", processor);
868 return answer;
869 }
870
871 LOG.trace("Creating error handler for: {}", processor);
872 ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
873 // create error handler (create error handler directly to keep it light weight,
874 // instead of using ProcessorDefinition.wrapInErrorHandler)
875 try {
876 processor = builder.createErrorHandler(routeContext, processor);
877
878 // and wrap in unit of work processor so the copy exchange also can run under UoW
879 answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
880
881 // must start the error handler
882 ServiceHelper.startServices(answer);
883 } catch (Exception e) {
884 throw ObjectHelper.wrapRuntimeCamelException(e);
885 }
886
887 // add to cache
888 errorHandlers.putIfAbsent(key, answer);
889 } else {
890 // and wrap in unit of work processor so the copy exchange also can run under UoW
891 answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
892 }
893
894 return answer;
895 }
896
897 /**
898 * Strategy to create the {@link UnitOfWorkProcessor} to be used for the sub route
899 *
900 * @param routeContext the route context
901 * @param processor the processor wrapped in this unit of work processor
902 * @param exchange the exchange
903 * @return the unit of work processor
904 */
905 protected UnitOfWorkProcessor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
906 UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
907 if (parent != null) {
908 return new ChildUnitOfWorkProcessor(parent, routeContext, processor);
909 } else {
910 return new UnitOfWorkProcessor(routeContext, processor);
911 }
912 }
913
914 /**
915 * Prepares the exchange for participating in a shared unit of work
916 * <p/>
917 * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
918 * in a shared unit of work.
919 *
920 * @param childExchange the child exchange
921 * @param parentExchange the parent exchange
922 */
923 protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
924 childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
925 }
926
927 protected void doStart() throws Exception {
928 if (isParallelProcessing() && executorService == null) {
929 throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
930 }
931 if (timeout > 0 && !isParallelProcessing()) {
932 throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
933 }
934 if (isParallelProcessing() && aggregateExecutorService == null) {
935 // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
936 // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
937 // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
938 String name = getClass().getSimpleName() + "-AggregateTask";
939 aggregateExecutorService = createAggregateExecutorService(name);
940 }
941 ServiceHelper.startServices(processors);
942 }
943
944 /**
945 * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
946 * completed tasks when running in parallel mode.
947 *
948 * @param name the suggested name for the background thread
949 * @return the thread pool
950 */
951 protected synchronized ExecutorService createAggregateExecutorService(String name) {
952 // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
953 return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
954 }
955
956 @Override
957 protected void doStop() throws Exception {
958 ServiceHelper.stopServices(processors, errorHandlers);
959 }
960
961 @Override
962 protected void doShutdown() throws Exception {
963 ServiceHelper.stopAndShutdownServices(processors, errorHandlers);
964 // only clear error handlers when shutting down
965 errorHandlers.clear();
966
967 if (shutdownExecutorService && executorService != null) {
968 getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
969 }
970 }
971
972 protected static void setToEndpoint(Exchange exchange, Processor processor) {
973 if (processor instanceof Producer) {
974 Producer producer = (Producer) processor;
975 exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
976 }
977 }
978
979 protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
980 AggregationStrategy answer = null;
981
982 // prefer to use per Exchange aggregation strategy over a global strategy
983 if (exchange != null) {
984 Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
985 Map<Object, AggregationStrategy> map = CastUtils.cast(property);
986 if (map != null) {
987 answer = map.get(this);
988 }
989 }
990 if (answer == null) {
991 // fallback to global strategy
992 answer = getAggregationStrategy();
993 }
994 return answer;
995 }
996
997 /**
998 * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
999 *
1000 * @param exchange the exchange
1001 * @param aggregationStrategy the strategy
1002 */
1003 protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1004 Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1005 Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1006 if (map == null) {
1007 map = new HashMap<Object, AggregationStrategy>();
1008 }
1009 // store the strategy using this processor as the key
1010 // (so we can store multiple strategies on the same exchange)
1011 map.put(this, aggregationStrategy);
1012 exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1013 }
1014
1015 /**
1016 * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1017 * which must be done after use.
1018 *
1019 * @param exchange the current exchange
1020 */
1021 protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1022 Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1023 Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1024 if (map == null) {
1025 return;
1026 }
1027 // remove the strategy using this processor as the key
1028 map.remove(this);
1029 }
1030
1031 /**
1032 * Is the multicast processor working in streaming mode?
1033 * <p/>
1034 * In streaming mode:
1035 * <ul>
1036 * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1037 * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1038 * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1039 * </ul>
1040 */
1041 public boolean isStreaming() {
1042 return streaming;
1043 }
1044
1045 /**
1046 * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1047 */
1048 public boolean isStopOnException() {
1049 return stopOnException;
1050 }
1051
1052 /**
1053 * Returns the producers to multicast to
1054 */
1055 public Collection<Processor> getProcessors() {
1056 return processors;
1057 }
1058
1059 /**
1060 * An optional timeout in millis when using parallel processing
1061 */
1062 public long getTimeout() {
1063 return timeout;
1064 }
1065
1066 /**
1067 * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1068 */
1069 public AggregationStrategy getAggregationStrategy() {
1070 return aggregationStrategy;
1071 }
1072
1073 public boolean isParallelProcessing() {
1074 return parallelProcessing;
1075 }
1076
1077 public boolean isShareUnitOfWork() {
1078 return shareUnitOfWork;
1079 }
1080
1081 public List<Processor> next() {
1082 if (!hasNext()) {
1083 return null;
1084 }
1085 return new ArrayList<Processor>(processors);
1086 }
1087
1088 public boolean hasNext() {
1089 return processors != null && !processors.isEmpty();
1090 }
1091 }