001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.camel.processor; 018 019import java.io.Closeable; 020import java.util.ArrayList; 021import java.util.Collection; 022import java.util.Iterator; 023import java.util.List; 024import java.util.Map; 025import java.util.concurrent.Callable; 026import java.util.concurrent.CompletionService; 027import java.util.concurrent.ConcurrentHashMap; 028import java.util.concurrent.ConcurrentMap; 029import java.util.concurrent.CountDownLatch; 030import java.util.concurrent.ExecutionException; 031import java.util.concurrent.ExecutorCompletionService; 032import java.util.concurrent.ExecutorService; 033import java.util.concurrent.Future; 034import java.util.concurrent.TimeUnit; 035import java.util.concurrent.atomic.AtomicBoolean; 036import java.util.concurrent.atomic.AtomicInteger; 037 038import org.apache.camel.AsyncCallback; 039import org.apache.camel.AsyncProcessor; 040import org.apache.camel.CamelContext; 041import org.apache.camel.CamelContextAware; 042import org.apache.camel.CamelExchangeException; 043import org.apache.camel.Endpoint; 044import org.apache.camel.ErrorHandlerFactory; 045import org.apache.camel.Exchange; 046import org.apache.camel.Navigate; 047import org.apache.camel.Processor; 048import org.apache.camel.Producer; 049import org.apache.camel.StreamCache; 050import org.apache.camel.Traceable; 051import org.apache.camel.processor.aggregate.AggregationStrategy; 052import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy; 053import org.apache.camel.processor.aggregate.DelegateAggregationStrategy; 054import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy; 055import org.apache.camel.spi.IdAware; 056import org.apache.camel.spi.RouteContext; 057import org.apache.camel.spi.TracedRouteNodes; 058import org.apache.camel.spi.UnitOfWork; 059import org.apache.camel.support.ServiceSupport; 060import org.apache.camel.util.AsyncProcessorConverterHelper; 061import org.apache.camel.util.AsyncProcessorHelper; 062import org.apache.camel.util.CastUtils; 063import org.apache.camel.util.EventHelper; 064import org.apache.camel.util.ExchangeHelper; 065import org.apache.camel.util.IOHelper; 066import org.apache.camel.util.KeyValueHolder; 067import org.apache.camel.util.ObjectHelper; 068import org.apache.camel.util.ServiceHelper; 069import org.apache.camel.util.StopWatch; 070import org.apache.camel.util.concurrent.AtomicException; 071import org.apache.camel.util.concurrent.AtomicExchange; 072import org.apache.camel.util.concurrent.SubmitOrderedCompletionService; 073import org.slf4j.Logger; 074import org.slf4j.LoggerFactory; 075 076import static org.apache.camel.util.ObjectHelper.notNull; 077 078 079/** 080 * Implements the Multicast pattern to send a message exchange to a number of 081 * endpoints, each endpoint receiving a copy of the message exchange. 082 * 083 * @version 084 * @see Pipeline 085 */ 086public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware { 087 088 private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class); 089 090 /** 091 * Class that represent each step in the multicast route to do 092 */ 093 static final class DefaultProcessorExchangePair implements ProcessorExchangePair { 094 private final int index; 095 private final Processor processor; 096 private final Processor prepared; 097 private final Exchange exchange; 098 099 private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) { 100 this.index = index; 101 this.processor = processor; 102 this.prepared = prepared; 103 this.exchange = exchange; 104 } 105 106 public int getIndex() { 107 return index; 108 } 109 110 public Exchange getExchange() { 111 return exchange; 112 } 113 114 public Producer getProducer() { 115 if (processor instanceof Producer) { 116 return (Producer) processor; 117 } 118 return null; 119 } 120 121 public Processor getProcessor() { 122 return prepared; 123 } 124 125 public void begin() { 126 // noop 127 } 128 129 public void done() { 130 // noop 131 } 132 133 } 134 135 /** 136 * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges 137 * <p/> 138 * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods. 139 */ 140 static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> { 141 142 PreparedErrorHandler(RouteContext key, Processor value) { 143 super(key, value); 144 } 145 146 } 147 148 protected final Processor onPrepare; 149 private final CamelContext camelContext; 150 private String id; 151 private Collection<Processor> processors; 152 private final AggregationStrategy aggregationStrategy; 153 private final boolean parallelProcessing; 154 private final boolean streaming; 155 private final boolean parallelAggregate; 156 private final boolean stopOnAggregateException; 157 private final boolean stopOnException; 158 private final ExecutorService executorService; 159 private final boolean shutdownExecutorService; 160 private ExecutorService aggregateExecutorService; 161 private final long timeout; 162 private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>(); 163 private final boolean shareUnitOfWork; 164 165 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) { 166 this(camelContext, processors, null); 167 } 168 169 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) { 170 this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false); 171 } 172 173 @Deprecated 174 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy, 175 boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, 176 boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) { 177 this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService, 178 streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false); 179 } 180 181 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy, boolean parallelProcessing, 182 ExecutorService executorService, boolean shutdownExecutorService, boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, 183 boolean shareUnitOfWork, boolean parallelAggregate) { 184 this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService, streaming, stopOnException, timeout, onPrepare, 185 shareUnitOfWork, false, false); 186 } 187 188 public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy, 189 boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming, 190 boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork, 191 boolean parallelAggregate, boolean stopOnAggregateException) { 192 notNull(camelContext, "camelContext"); 193 this.camelContext = camelContext; 194 this.processors = processors; 195 this.aggregationStrategy = aggregationStrategy; 196 this.executorService = executorService; 197 this.shutdownExecutorService = shutdownExecutorService; 198 this.streaming = streaming; 199 this.stopOnException = stopOnException; 200 // must enable parallel if executor service is provided 201 this.parallelProcessing = parallelProcessing || executorService != null; 202 this.timeout = timeout; 203 this.onPrepare = onPrepare; 204 this.shareUnitOfWork = shareUnitOfWork; 205 this.parallelAggregate = parallelAggregate; 206 this.stopOnAggregateException = stopOnAggregateException; 207 } 208 209 @Override 210 public String toString() { 211 return "Multicast[" + getProcessors() + "]"; 212 } 213 214 public String getId() { 215 return id; 216 } 217 218 public void setId(String id) { 219 this.id = id; 220 } 221 222 public String getTraceLabel() { 223 return "multicast"; 224 } 225 226 public CamelContext getCamelContext() { 227 return camelContext; 228 } 229 230 public void process(Exchange exchange) throws Exception { 231 AsyncProcessorHelper.process(this, exchange); 232 } 233 234 public boolean process(Exchange exchange, AsyncCallback callback) { 235 final AtomicExchange result = new AtomicExchange(); 236 Iterable<ProcessorExchangePair> pairs = null; 237 238 try { 239 boolean sync = true; 240 241 pairs = createProcessorExchangePairs(exchange); 242 243 if (isParallelProcessing()) { 244 // ensure an executor is set when running in parallel 245 ObjectHelper.notNull(executorService, "executorService", this); 246 doProcessParallel(exchange, result, pairs, isStreaming(), callback); 247 } else { 248 sync = doProcessSequential(exchange, result, pairs, callback); 249 } 250 251 if (!sync) { 252 // the remainder of the multicast will be completed async 253 // so we break out now, then the callback will be invoked which then continue routing from where we left here 254 return false; 255 } 256 } catch (Throwable e) { 257 exchange.setException(e); 258 // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted 259 // and do the done work 260 doDone(exchange, null, pairs, callback, true, false); 261 return true; 262 } 263 264 // multicasting was processed successfully 265 // and do the done work 266 Exchange subExchange = result.get() != null ? result.get() : null; 267 doDone(exchange, subExchange, pairs, callback, true, true); 268 return true; 269 } 270 271 protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs, 272 final boolean streaming, final AsyncCallback callback) throws Exception { 273 274 ObjectHelper.notNull(executorService, "ExecutorService", this); 275 ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this); 276 277 final CompletionService<Exchange> completion; 278 if (streaming) { 279 // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence) 280 completion = new ExecutorCompletionService<Exchange>(executorService); 281 } else { 282 // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence) 283 completion = new SubmitOrderedCompletionService<Exchange>(executorService); 284 } 285 286 final AtomicInteger total = new AtomicInteger(0); 287 final Iterator<ProcessorExchangePair> it = pairs.iterator(); 288 289 if (it.hasNext()) { 290 // when parallel then aggregate on the fly 291 final AtomicBoolean running = new AtomicBoolean(true); 292 final AtomicBoolean allTasksSubmitted = new AtomicBoolean(); 293 final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1); 294 final AtomicException executionException = new AtomicException(); 295 296 // issue task to execute in separate thread so it can aggregate on-the-fly 297 // while we submit new tasks, and those tasks complete concurrently 298 // this allows us to optimize work and reduce memory consumption 299 final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running, 300 aggregationOnTheFlyDone, allTasksSubmitted, executionException); 301 final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean(); 302 303 LOG.trace("Starting to submit parallel tasks"); 304 305 try { 306 while (it.hasNext()) { 307 final ProcessorExchangePair pair = it.next(); 308 // in case the iterator returns null then continue to next 309 if (pair == null) { 310 continue; 311 } 312 313 final Exchange subExchange = pair.getExchange(); 314 updateNewExchange(subExchange, total.intValue(), pairs, it); 315 316 completion.submit(new Callable<Exchange>() { 317 public Exchange call() throws Exception { 318 // start the aggregation task at this stage only in order not to pile up too many threads 319 if (aggregationTaskSubmitted.compareAndSet(false, true)) { 320 // but only submit the aggregation task once 321 aggregateExecutorService.submit(aggregateOnTheFlyTask); 322 } 323 324 if (!running.get()) { 325 // do not start processing the task if we are not running 326 return subExchange; 327 } 328 329 try { 330 doProcessParallel(pair); 331 } catch (Throwable e) { 332 subExchange.setException(e); 333 } 334 335 // Decide whether to continue with the multicast or not; similar logic to the Pipeline 336 Integer number = getExchangeIndex(subExchange); 337 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG); 338 if (stopOnException && !continueProcessing) { 339 // signal to stop running 340 running.set(false); 341 // throw caused exception 342 if (subExchange.getException() != null) { 343 // wrap in exception to explain where it failed 344 CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException()); 345 subExchange.setException(cause); 346 } 347 } 348 349 LOG.trace("Parallel processing complete for exchange: {}", subExchange); 350 return subExchange; 351 } 352 }); 353 354 total.incrementAndGet(); 355 } 356 } catch (Throwable e) { 357 // The methods it.hasNext and it.next can throw RuntimeExceptions when custom iterators are implemented. 358 // We have to catch the exception here otherwise the aggregator threads would pile up. 359 if (e instanceof Exception) { 360 executionException.set((Exception) e); 361 } else { 362 executionException.set(ObjectHelper.wrapRuntimeCamelException(e)); 363 } 364 } 365 366 // signal all tasks has been submitted 367 LOG.trace("Signaling that all {} tasks has been submitted.", total.get()); 368 allTasksSubmitted.set(true); 369 370 // its to hard to do parallel async routing so we let the caller thread be synchronously 371 // and have it pickup the replies and do the aggregation (eg we use a latch to wait) 372 // wait for aggregation to be done 373 LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId()); 374 aggregationOnTheFlyDone.await(); 375 376 // did we fail for whatever reason, if so throw that caused exception 377 if (executionException.get() != null) { 378 if (LOG.isDebugEnabled()) { 379 LOG.debug("Parallel processing failed due {}", executionException.get().getMessage()); 380 } 381 throw executionException.get(); 382 } 383 } 384 385 // no everything is okay so we are done 386 LOG.debug("Done parallel processing {} exchanges", total); 387 } 388 389 /** 390 * Boss worker to control aggregate on-the-fly for completed tasks when using parallel processing. 391 * <p/> 392 * This ensures lower memory consumption as we do not need to keep all completed tasks in memory 393 * before we perform aggregation. Instead this separate thread will run and aggregate when new 394 * completed tasks is done. 395 * <p/> 396 * The logic is fairly complex as this implementation has to keep track how far it got, and also 397 * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue 398 * processing when the entire splitting is done. 399 */ 400 private final class AggregateOnTheFlyTask implements Runnable { 401 402 private final AtomicExchange result; 403 private final Exchange original; 404 private final AtomicInteger total; 405 private final CompletionService<Exchange> completion; 406 private final AtomicBoolean running; 407 private final CountDownLatch aggregationOnTheFlyDone; 408 private final AtomicBoolean allTasksSubmitted; 409 private final AtomicException executionException; 410 411 private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total, 412 CompletionService<Exchange> completion, AtomicBoolean running, 413 CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted, 414 AtomicException executionException) { 415 this.result = result; 416 this.original = original; 417 this.total = total; 418 this.completion = completion; 419 this.running = running; 420 this.aggregationOnTheFlyDone = aggregationOnTheFlyDone; 421 this.allTasksSubmitted = allTasksSubmitted; 422 this.executionException = executionException; 423 } 424 425 public void run() { 426 LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId()); 427 428 try { 429 aggregateOnTheFly(); 430 } catch (Throwable e) { 431 if (e instanceof Exception) { 432 executionException.set((Exception) e); 433 } else { 434 executionException.set(ObjectHelper.wrapRuntimeCamelException(e)); 435 } 436 } finally { 437 // must signal we are done so the latch can open and let the other thread continue processing 438 LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId()); 439 LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId()); 440 aggregationOnTheFlyDone.countDown(); 441 } 442 } 443 444 private void aggregateOnTheFly() throws InterruptedException, ExecutionException { 445 final AtomicBoolean timedOut = new AtomicBoolean(); 446 boolean stoppedOnException = false; 447 final StopWatch watch = new StopWatch(); 448 final AtomicInteger aggregated = new AtomicInteger(); 449 boolean done = false; 450 // not a for loop as on the fly may still run 451 while (!done) { 452 // check if we have already aggregate everything 453 if (allTasksSubmitted.get() && aggregated.intValue() >= total.get()) { 454 LOG.debug("Done aggregating {} exchanges on the fly.", aggregated); 455 break; 456 } 457 458 Future<Exchange> future; 459 if (timedOut.get()) { 460 // we are timed out but try to grab if some tasks has been completed 461 // poll will return null if no tasks is present 462 future = completion.poll(); 463 LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future); 464 } else if (timeout > 0) { 465 long left = timeout - watch.taken(); 466 if (left < 0) { 467 left = 0; 468 } 469 LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left); 470 future = completion.poll(left, TimeUnit.MILLISECONDS); 471 } else { 472 LOG.trace("Polling completion task #{}", aggregated); 473 // we must not block so poll every second 474 future = completion.poll(1, TimeUnit.SECONDS); 475 if (future == null) { 476 // and continue loop which will recheck if we are done 477 continue; 478 } 479 } 480 481 if (future == null) { 482 ParallelAggregateTimeoutTask task = new ParallelAggregateTimeoutTask(original, result, completion, aggregated, total, timedOut); 483 if (parallelAggregate) { 484 aggregateExecutorService.submit(task); 485 } else { 486 // in non parallel mode then just run the task 487 task.run(); 488 } 489 } else { 490 // there is a result to aggregate 491 Exchange subExchange = future.get(); 492 493 // Decide whether to continue with the multicast or not; similar logic to the Pipeline 494 Integer number = getExchangeIndex(subExchange); 495 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG); 496 if (stopOnException && !continueProcessing) { 497 // we want to stop on exception and an exception or failure occurred 498 // this is similar to what the pipeline does, so we should do the same to not surprise end users 499 // so we should set the failed exchange as the result and break out 500 result.set(subExchange); 501 stoppedOnException = true; 502 break; 503 } 504 505 // we got a result so aggregate it 506 ParallelAggregateTask task = new ParallelAggregateTask(result, subExchange, aggregated); 507 if (parallelAggregate) { 508 aggregateExecutorService.submit(task); 509 } else { 510 // in non parallel mode then just run the task 511 task.run(); 512 } 513 } 514 } 515 516 if (timedOut.get() || stoppedOnException) { 517 if (timedOut.get()) { 518 LOG.debug("Cancelling tasks due timeout after {} millis.", timeout); 519 } 520 if (stoppedOnException) { 521 LOG.debug("Cancelling tasks due stopOnException."); 522 } 523 // cancel tasks as we timed out (its safe to cancel done tasks) 524 running.set(false); 525 } 526 } 527 } 528 529 /** 530 * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing. 531 */ 532 private final class ParallelAggregateTask implements Runnable { 533 534 private final AtomicExchange result; 535 private final Exchange subExchange; 536 private final AtomicInteger aggregated; 537 538 private ParallelAggregateTask(AtomicExchange result, Exchange subExchange, AtomicInteger aggregated) { 539 this.result = result; 540 this.subExchange = subExchange; 541 this.aggregated = aggregated; 542 } 543 544 @Override 545 public void run() { 546 try { 547 if (parallelAggregate) { 548 doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange); 549 } else { 550 doAggregate(getAggregationStrategy(subExchange), result, subExchange); 551 } 552 } catch (Throwable e) { 553 if (isStopOnAggregateException()) { 554 throw e; 555 } else { 556 // wrap in exception to explain where it failed 557 CamelExchangeException cex = new CamelExchangeException("Parallel processing failed for number " + aggregated.get(), subExchange, e); 558 subExchange.setException(cex); 559 LOG.debug(cex.getMessage(), cex); 560 } 561 } finally { 562 aggregated.incrementAndGet(); 563 } 564 } 565 } 566 567 /** 568 * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing. 569 */ 570 private final class ParallelAggregateTimeoutTask implements Runnable { 571 572 private final Exchange original; 573 private final AtomicExchange result; 574 private final CompletionService<Exchange> completion; 575 private final AtomicInteger aggregated; 576 private final AtomicInteger total; 577 private final AtomicBoolean timedOut; 578 579 private ParallelAggregateTimeoutTask(Exchange original, AtomicExchange result, CompletionService<Exchange> completion, 580 AtomicInteger aggregated, AtomicInteger total, AtomicBoolean timedOut) { 581 this.original = original; 582 this.result = result; 583 this.completion = completion; 584 this.aggregated = aggregated; 585 this.total = total; 586 this.timedOut = timedOut; 587 } 588 589 @Override 590 public void run() { 591 AggregationStrategy strategy = getAggregationStrategy(null); 592 if (strategy instanceof DelegateAggregationStrategy) { 593 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 594 } 595 if (strategy instanceof TimeoutAwareAggregationStrategy) { 596 // notify the strategy we timed out 597 Exchange oldExchange = result.get(); 598 if (oldExchange == null) { 599 // if they all timed out the result may not have been set yet, so use the original exchange 600 oldExchange = original; 601 } 602 ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated.intValue(), total.intValue(), timeout); 603 } else { 604 // log a WARN we timed out since it will not be aggregated and the Exchange will be lost 605 LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue()); 606 } 607 LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue()); 608 timedOut.set(true); 609 610 // mark that index as timed out, which allows us to try to retrieve 611 // any already completed tasks in the next loop 612 if (completion instanceof SubmitOrderedCompletionService) { 613 ((SubmitOrderedCompletionService<?>) completion).timeoutTask(); 614 } 615 616 // we timed out so increment the counter 617 aggregated.incrementAndGet(); 618 } 619 } 620 621 protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception { 622 AtomicInteger total = new AtomicInteger(); 623 Iterator<ProcessorExchangePair> it = pairs.iterator(); 624 625 while (it.hasNext()) { 626 ProcessorExchangePair pair = it.next(); 627 // in case the iterator returns null then continue to next 628 if (pair == null) { 629 continue; 630 } 631 Exchange subExchange = pair.getExchange(); 632 updateNewExchange(subExchange, total.get(), pairs, it); 633 634 boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total); 635 if (!sync) { 636 if (LOG.isTraceEnabled()) { 637 LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId()); 638 } 639 // the remainder of the multicast will be completed async 640 // so we break out now, then the callback will be invoked which then continue routing from where we left here 641 return false; 642 } 643 644 if (LOG.isTraceEnabled()) { 645 LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId()); 646 } 647 648 // Decide whether to continue with the multicast or not; similar logic to the Pipeline 649 // remember to test for stop on exception and aggregate before copying back results 650 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG); 651 if (stopOnException && !continueProcessing) { 652 if (subExchange.getException() != null) { 653 // wrap in exception to explain where it failed 654 CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException()); 655 subExchange.setException(cause); 656 } 657 // we want to stop on exception, and the exception was handled by the error handler 658 // this is similar to what the pipeline does, so we should do the same to not surprise end users 659 // so we should set the failed exchange as the result and be done 660 result.set(subExchange); 661 return true; 662 } 663 664 LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange); 665 666 if (parallelAggregate) { 667 doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange); 668 } else { 669 doAggregate(getAggregationStrategy(subExchange), result, subExchange); 670 } 671 672 total.incrementAndGet(); 673 } 674 675 LOG.debug("Done sequential processing {} exchanges", total); 676 677 return true; 678 } 679 680 private boolean doProcessSequential(final Exchange original, final AtomicExchange result, 681 final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it, 682 final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) { 683 boolean sync = true; 684 685 final Exchange exchange = pair.getExchange(); 686 Processor processor = pair.getProcessor(); 687 final Producer producer = pair.getProducer(); 688 689 TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null; 690 691 // compute time taken if sending to another endpoint 692 final StopWatch watch = producer != null ? new StopWatch() : null; 693 694 try { 695 // prepare tracing starting from a new block 696 if (traced != null) { 697 traced.pushBlock(); 698 } 699 700 if (producer != null) { 701 EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint()); 702 } 703 // let the prepared process it, remember to begin the exchange pair 704 AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor); 705 pair.begin(); 706 sync = async.process(exchange, new AsyncCallback() { 707 public void done(boolean doneSync) { 708 // we are done with the exchange pair 709 pair.done(); 710 711 // okay we are done, so notify the exchange was sent 712 if (producer != null) { 713 long timeTaken = watch.stop(); 714 Endpoint endpoint = producer.getEndpoint(); 715 // emit event that the exchange was sent to the endpoint 716 EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken); 717 } 718 719 // we only have to handle async completion of the routing slip 720 if (doneSync) { 721 return; 722 } 723 724 // continue processing the multicast asynchronously 725 Exchange subExchange = exchange; 726 727 // Decide whether to continue with the multicast or not; similar logic to the Pipeline 728 // remember to test for stop on exception and aggregate before copying back results 729 boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG); 730 if (stopOnException && !continueProcessing) { 731 if (subExchange.getException() != null) { 732 // wrap in exception to explain where it failed 733 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException())); 734 } else { 735 // we want to stop on exception, and the exception was handled by the error handler 736 // this is similar to what the pipeline does, so we should do the same to not surprise end users 737 // so we should set the failed exchange as the result and be done 738 result.set(subExchange); 739 } 740 // and do the done work 741 doDone(original, subExchange, pairs, callback, false, true); 742 return; 743 } 744 745 try { 746 if (parallelAggregate) { 747 doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange); 748 } else { 749 doAggregate(getAggregationStrategy(subExchange), result, subExchange); 750 } 751 } catch (Throwable e) { 752 // wrap in exception to explain where it failed 753 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e)); 754 // and do the done work 755 doDone(original, subExchange, pairs, callback, false, true); 756 return; 757 } 758 759 total.incrementAndGet(); 760 761 // maybe there are more processors to multicast 762 while (it.hasNext()) { 763 764 // prepare and run the next 765 ProcessorExchangePair pair = it.next(); 766 subExchange = pair.getExchange(); 767 updateNewExchange(subExchange, total.get(), pairs, it); 768 boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total); 769 770 if (!sync) { 771 LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId()); 772 return; 773 } 774 775 // Decide whether to continue with the multicast or not; similar logic to the Pipeline 776 // remember to test for stop on exception and aggregate before copying back results 777 continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG); 778 if (stopOnException && !continueProcessing) { 779 if (subExchange.getException() != null) { 780 // wrap in exception to explain where it failed 781 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException())); 782 } else { 783 // we want to stop on exception, and the exception was handled by the error handler 784 // this is similar to what the pipeline does, so we should do the same to not surprise end users 785 // so we should set the failed exchange as the result and be done 786 result.set(subExchange); 787 } 788 // and do the done work 789 doDone(original, subExchange, pairs, callback, false, true); 790 return; 791 } 792 793 // must catch any exceptions from aggregation 794 try { 795 if (parallelAggregate) { 796 doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange); 797 } else { 798 doAggregate(getAggregationStrategy(subExchange), result, subExchange); 799 } 800 } catch (Throwable e) { 801 // wrap in exception to explain where it failed 802 subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e)); 803 // and do the done work 804 doDone(original, subExchange, pairs, callback, false, true); 805 return; 806 } 807 808 total.incrementAndGet(); 809 } 810 811 // do the done work 812 subExchange = result.get() != null ? result.get() : null; 813 doDone(original, subExchange, pairs, callback, false, true); 814 } 815 }); 816 } finally { 817 // pop the block so by next round we have the same staring point and thus the tracing looks accurate 818 if (traced != null) { 819 traced.popBlock(); 820 } 821 } 822 823 return sync; 824 } 825 826 private void doProcessParallel(final ProcessorExchangePair pair) throws Exception { 827 final Exchange exchange = pair.getExchange(); 828 Processor processor = pair.getProcessor(); 829 Producer producer = pair.getProducer(); 830 831 TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null; 832 833 // compute time taken if sending to another endpoint 834 StopWatch watch = null; 835 if (producer != null) { 836 watch = new StopWatch(); 837 } 838 839 try { 840 // prepare tracing starting from a new block 841 if (traced != null) { 842 traced.pushBlock(); 843 } 844 845 if (producer != null) { 846 EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint()); 847 } 848 // let the prepared process it, remember to begin the exchange pair 849 AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor); 850 pair.begin(); 851 // we invoke it synchronously as parallel async routing is too hard 852 AsyncProcessorHelper.process(async, exchange); 853 } finally { 854 pair.done(); 855 // pop the block so by next round we have the same staring point and thus the tracing looks accurate 856 if (traced != null) { 857 traced.popBlock(); 858 } 859 if (producer != null) { 860 long timeTaken = watch.stop(); 861 Endpoint endpoint = producer.getEndpoint(); 862 // emit event that the exchange was sent to the endpoint 863 // this is okay to do here in the finally block, as the processing is not using the async routing engine 864 //( we invoke it synchronously as parallel async routing is too hard) 865 EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken); 866 } 867 } 868 } 869 870 /** 871 * Common work which must be done when we are done multicasting. 872 * <p/> 873 * This logic applies for both running synchronous and asynchronous as there are multiple exist points 874 * when using the asynchronous routing engine. And therefore we want the logic in one method instead 875 * of being scattered. 876 * 877 * @param original the original exchange 878 * @param subExchange the current sub exchange, can be <tt>null</tt> for the synchronous part 879 * @param pairs the pairs with the exchanges to process 880 * @param callback the callback 881 * @param doneSync the <tt>doneSync</tt> parameter to call on callback 882 * @param forceExhaust whether or not error handling is exhausted 883 */ 884 protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs, 885 AsyncCallback callback, boolean doneSync, boolean forceExhaust) { 886 887 // we are done so close the pairs iterator 888 if (pairs != null && pairs instanceof Closeable) { 889 IOHelper.close((Closeable) pairs, "pairs", LOG); 890 } 891 892 AggregationStrategy strategy = getAggregationStrategy(subExchange); 893 if (strategy instanceof DelegateAggregationStrategy) { 894 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 895 } 896 // invoke the on completion callback 897 if (strategy instanceof CompletionAwareAggregationStrategy) { 898 ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange); 899 } 900 901 // cleanup any per exchange aggregation strategy 902 removeAggregationStrategyFromExchange(original); 903 904 // we need to know if there was an exception, and if the stopOnException option was enabled 905 // also we would need to know if any error handler has attempted redelivery and exhausted 906 boolean stoppedOnException = false; 907 boolean exception = false; 908 boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange)); 909 if (original.getException() != null || subExchange != null && subExchange.getException() != null) { 910 // there was an exception and we stopped 911 stoppedOnException = isStopOnException(); 912 exception = true; 913 } 914 915 // must copy results at this point 916 if (subExchange != null) { 917 if (stoppedOnException) { 918 // if we stopped due an exception then only propagate the exception 919 original.setException(subExchange.getException()); 920 } else { 921 // copy the current result to original so it will contain this result of this eip 922 ExchangeHelper.copyResults(original, subExchange); 923 } 924 } 925 926 // .. and then if there was an exception we need to configure the redelivery exhaust 927 // for example the noErrorHandler will not cause redelivery exhaust so if this error 928 // handled has been in use, then the exhaust would be false (if not forced) 929 if (exception) { 930 // multicast uses error handling on its output processors and they have tried to redeliver 931 // so we shall signal back to the other error handlers that we are exhausted and they should not 932 // also try to redeliver as we will then do that twice 933 original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust); 934 } 935 936 callback.done(doneSync); 937 } 938 939 /** 940 * Aggregate the {@link Exchange} with the current result. 941 * This method is synchronized and is called directly when parallelAggregate is disabled (by default). 942 * 943 * @param strategy the aggregation strategy to use 944 * @param result the current result 945 * @param exchange the exchange to be added to the result 946 * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange) 947 */ 948 protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) { 949 doAggregateInternal(strategy, result, exchange); 950 } 951 952 /** 953 * Aggregate the {@link Exchange} with the current result. 954 * This method is unsynchronized and is called directly when parallelAggregate is enabled. 955 * In all other cases, this method is called from the doAggregate which is a synchronized method 956 * 957 * @param strategy the aggregation strategy to use 958 * @param result the current result 959 * @param exchange the exchange to be added to the result 960 * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange) 961 */ 962 protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) { 963 if (strategy != null) { 964 // prepare the exchanges for aggregation 965 Exchange oldExchange = result.get(); 966 ExchangeHelper.prepareAggregation(oldExchange, exchange); 967 result.set(strategy.aggregate(oldExchange, exchange)); 968 } 969 } 970 971 protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs, 972 Iterator<ProcessorExchangePair> it) { 973 exchange.setProperty(Exchange.MULTICAST_INDEX, index); 974 if (it.hasNext()) { 975 exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE); 976 } else { 977 exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE); 978 } 979 } 980 981 protected Integer getExchangeIndex(Exchange exchange) { 982 return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class); 983 } 984 985 protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception { 986 List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size()); 987 988 StreamCache streamCache = null; 989 if (isParallelProcessing() && exchange.getIn().getBody() instanceof StreamCache) { 990 // in parallel processing case, the stream must be copied, therefore get the stream 991 streamCache = (StreamCache) exchange.getIn().getBody(); 992 } 993 994 int index = 0; 995 for (Processor processor : processors) { 996 // copy exchange, and do not share the unit of work 997 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false); 998 999 if (streamCache != null) { 1000 if (index > 0) { 1001 // copy it otherwise parallel processing is not possible, 1002 // because streams can only be read once 1003 StreamCache copiedStreamCache = streamCache.copy(copy); 1004 if (copiedStreamCache != null) { 1005 copy.getIn().setBody(copiedStreamCache); 1006 } 1007 } 1008 } 1009 1010 // If the multi-cast processor has an aggregation strategy 1011 // then the StreamCache created by the child routes must not be 1012 // closed by the unit of work of the child route, but by the unit of 1013 // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting). 1014 // Set therefore the unit of work of the parent route as stream cache unit of work, 1015 // if it is not already set. 1016 if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) { 1017 copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork()); 1018 } 1019 // if we share unit of work, we need to prepare the child exchange 1020 if (isShareUnitOfWork()) { 1021 prepareSharedUnitOfWork(copy, exchange); 1022 } 1023 1024 // and add the pair 1025 RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null; 1026 result.add(createProcessorExchangePair(index++, processor, copy, routeContext)); 1027 } 1028 1029 if (exchange.getException() != null) { 1030 // force any exceptions occurred during creation of exchange paris to be thrown 1031 // before returning the answer; 1032 throw exchange.getException(); 1033 } 1034 1035 return result; 1036 } 1037 1038 /** 1039 * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out. 1040 * <p/> 1041 * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they 1042 * need to be specially prepared before use. 1043 * 1044 * @param index the index 1045 * @param processor the processor 1046 * @param exchange the exchange 1047 * @param routeContext the route context 1048 * @return prepared for use 1049 */ 1050 protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange, 1051 RouteContext routeContext) { 1052 Processor prepared = processor; 1053 1054 // set property which endpoint we send to 1055 setToEndpoint(exchange, prepared); 1056 1057 // rework error handling to support fine grained error handling 1058 prepared = createErrorHandler(routeContext, exchange, prepared); 1059 1060 // invoke on prepare on the exchange if specified 1061 if (onPrepare != null) { 1062 try { 1063 onPrepare.process(exchange); 1064 } catch (Exception e) { 1065 exchange.setException(e); 1066 } 1067 } 1068 return new DefaultProcessorExchangePair(index, processor, prepared, exchange); 1069 } 1070 1071 protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) { 1072 Processor answer; 1073 1074 boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class); 1075 1076 // do not wrap in error handler if we are inside a try block 1077 if (!tryBlock && routeContext != null) { 1078 // wrap the producer in error handler so we have fine grained error handling on 1079 // the output side instead of the input side 1080 // this is needed to support redelivery on that output alone and not doing redelivery 1081 // for the entire multicast block again which will start from scratch again 1082 1083 // create key for cache 1084 final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor); 1085 1086 // lookup cached first to reuse and preserve memory 1087 answer = errorHandlers.get(key); 1088 if (answer != null) { 1089 LOG.trace("Using existing error handler for: {}", processor); 1090 return answer; 1091 } 1092 1093 LOG.trace("Creating error handler for: {}", processor); 1094 ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder(); 1095 // create error handler (create error handler directly to keep it light weight, 1096 // instead of using ProcessorDefinition.wrapInErrorHandler) 1097 try { 1098 processor = builder.createErrorHandler(routeContext, processor); 1099 1100 // and wrap in unit of work processor so the copy exchange also can run under UoW 1101 answer = createUnitOfWorkProcessor(routeContext, processor, exchange); 1102 1103 boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null; 1104 1105 // must start the error handler 1106 ServiceHelper.startServices(answer); 1107 1108 // here we don't cache the child unit of work 1109 if (!child) { 1110 // add to cache 1111 errorHandlers.putIfAbsent(key, answer); 1112 } 1113 1114 } catch (Exception e) { 1115 throw ObjectHelper.wrapRuntimeCamelException(e); 1116 } 1117 } else { 1118 // and wrap in unit of work processor so the copy exchange also can run under UoW 1119 answer = createUnitOfWorkProcessor(routeContext, processor, exchange); 1120 } 1121 1122 return answer; 1123 } 1124 1125 /** 1126 * Strategy to create the unit of work to be used for the sub route 1127 * 1128 * @param routeContext the route context 1129 * @param processor the processor 1130 * @param exchange the exchange 1131 * @return the unit of work processor 1132 */ 1133 protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) { 1134 CamelInternalProcessor internal = new CamelInternalProcessor(processor); 1135 1136 // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW 1137 UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class); 1138 if (parent != null) { 1139 internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeContext, parent)); 1140 } else { 1141 internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeContext)); 1142 } 1143 1144 return internal; 1145 } 1146 1147 /** 1148 * Prepares the exchange for participating in a shared unit of work 1149 * <p/> 1150 * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate 1151 * in a shared unit of work. 1152 * 1153 * @param childExchange the child exchange 1154 * @param parentExchange the parent exchange 1155 */ 1156 protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) { 1157 childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork()); 1158 } 1159 1160 protected void doStart() throws Exception { 1161 if (isParallelProcessing() && executorService == null) { 1162 throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set"); 1163 } 1164 if (timeout > 0 && !isParallelProcessing()) { 1165 throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled"); 1166 } 1167 if (isParallelProcessing() && aggregateExecutorService == null) { 1168 // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread 1169 // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run 1170 // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing 1171 String name = getClass().getSimpleName() + "-AggregateTask"; 1172 aggregateExecutorService = createAggregateExecutorService(name); 1173 } 1174 if (aggregationStrategy instanceof CamelContextAware) { 1175 ((CamelContextAware) aggregationStrategy).setCamelContext(camelContext); 1176 } 1177 1178 ServiceHelper.startServices(aggregationStrategy, processors); 1179 } 1180 1181 /** 1182 * Strategy to create the thread pool for the aggregator background task which waits for and aggregates 1183 * completed tasks when running in parallel mode. 1184 * 1185 * @param name the suggested name for the background thread 1186 * @return the thread pool 1187 */ 1188 protected synchronized ExecutorService createAggregateExecutorService(String name) { 1189 // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in 1190 return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name); 1191 } 1192 1193 @Override 1194 protected void doStop() throws Exception { 1195 ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy); 1196 } 1197 1198 @Override 1199 protected void doShutdown() throws Exception { 1200 ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy); 1201 // only clear error handlers when shutting down 1202 errorHandlers.clear(); 1203 1204 if (shutdownExecutorService && executorService != null) { 1205 getCamelContext().getExecutorServiceManager().shutdownNow(executorService); 1206 } 1207 if (aggregateExecutorService != null) { 1208 getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService); 1209 } 1210 } 1211 1212 protected static void setToEndpoint(Exchange exchange, Processor processor) { 1213 if (processor instanceof Producer) { 1214 Producer producer = (Producer) processor; 1215 exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri()); 1216 } 1217 } 1218 1219 protected AggregationStrategy getAggregationStrategy(Exchange exchange) { 1220 AggregationStrategy answer = null; 1221 1222 // prefer to use per Exchange aggregation strategy over a global strategy 1223 if (exchange != null) { 1224 Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class); 1225 Map<Object, AggregationStrategy> map = CastUtils.cast(property); 1226 if (map != null) { 1227 answer = map.get(this); 1228 } 1229 } 1230 if (answer == null) { 1231 // fallback to global strategy 1232 answer = getAggregationStrategy(); 1233 } 1234 return answer; 1235 } 1236 1237 /** 1238 * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}. 1239 * 1240 * @param exchange the exchange 1241 * @param aggregationStrategy the strategy 1242 */ 1243 protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) { 1244 Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class); 1245 Map<Object, AggregationStrategy> map = CastUtils.cast(property); 1246 if (map == null) { 1247 map = new ConcurrentHashMap<Object, AggregationStrategy>(); 1248 } else { 1249 // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties 1250 // we just create a new copy if we need to change the map 1251 map = new ConcurrentHashMap<Object, AggregationStrategy>(map); 1252 } 1253 // store the strategy using this processor as the key 1254 // (so we can store multiple strategies on the same exchange) 1255 map.put(this, aggregationStrategy); 1256 exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map); 1257 } 1258 1259 /** 1260 * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange} 1261 * which must be done after use. 1262 * 1263 * @param exchange the current exchange 1264 */ 1265 protected void removeAggregationStrategyFromExchange(Exchange exchange) { 1266 Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class); 1267 Map<Object, AggregationStrategy> map = CastUtils.cast(property); 1268 if (map == null) { 1269 return; 1270 } 1271 // remove the strategy using this processor as the key 1272 map.remove(this); 1273 } 1274 1275 /** 1276 * Is the multicast processor working in streaming mode? 1277 * <p/> 1278 * In streaming mode: 1279 * <ul> 1280 * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li> 1281 * <li>for parallel processing, we start aggregating responses as they get send back to the processor; 1282 * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li> 1283 * </ul> 1284 */ 1285 public boolean isStreaming() { 1286 return streaming; 1287 } 1288 1289 /** 1290 * Should the multicast processor stop processing further exchanges in case of an exception occurred? 1291 */ 1292 public boolean isStopOnException() { 1293 return stopOnException; 1294 } 1295 1296 /** 1297 * Returns the producers to multicast to 1298 */ 1299 public Collection<Processor> getProcessors() { 1300 return processors; 1301 } 1302 1303 /** 1304 * An optional timeout in millis when using parallel processing 1305 */ 1306 public long getTimeout() { 1307 return timeout; 1308 } 1309 1310 /** 1311 * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead. 1312 */ 1313 public AggregationStrategy getAggregationStrategy() { 1314 return aggregationStrategy; 1315 } 1316 1317 public boolean isParallelProcessing() { 1318 return parallelProcessing; 1319 } 1320 1321 public boolean isParallelAggregate() { 1322 return parallelAggregate; 1323 } 1324 1325 public boolean isStopOnAggregateException() { 1326 return stopOnAggregateException; 1327 } 1328 1329 public boolean isShareUnitOfWork() { 1330 return shareUnitOfWork; 1331 } 1332 1333 public List<Processor> next() { 1334 if (!hasNext()) { 1335 return null; 1336 } 1337 return new ArrayList<Processor>(processors); 1338 } 1339 1340 public boolean hasNext() { 1341 return processors != null && !processors.isEmpty(); 1342 } 1343}