001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.camel.processor.aggregate; 018 019import java.util.ArrayList; 020import java.util.Collections; 021import java.util.LinkedHashSet; 022import java.util.List; 023import java.util.Map; 024import java.util.Set; 025import java.util.concurrent.ConcurrentHashMap; 026import java.util.concurrent.ConcurrentSkipListSet; 027import java.util.concurrent.ExecutorService; 028import java.util.concurrent.ScheduledExecutorService; 029import java.util.concurrent.TimeUnit; 030import java.util.concurrent.atomic.AtomicBoolean; 031import java.util.concurrent.atomic.AtomicInteger; 032import java.util.concurrent.atomic.AtomicLong; 033import java.util.concurrent.locks.Lock; 034import java.util.concurrent.locks.ReentrantLock; 035 036import org.apache.camel.AsyncCallback; 037import org.apache.camel.AsyncProcessor; 038import org.apache.camel.CamelContext; 039import org.apache.camel.CamelContextAware; 040import org.apache.camel.CamelExchangeException; 041import org.apache.camel.Endpoint; 042import org.apache.camel.Exchange; 043import org.apache.camel.Expression; 044import org.apache.camel.Navigate; 045import org.apache.camel.NoSuchEndpointException; 046import org.apache.camel.Predicate; 047import org.apache.camel.Processor; 048import org.apache.camel.ProducerTemplate; 049import org.apache.camel.ShutdownRunningTask; 050import org.apache.camel.TimeoutMap; 051import org.apache.camel.Traceable; 052import org.apache.camel.spi.AggregationRepository; 053import org.apache.camel.spi.ExceptionHandler; 054import org.apache.camel.spi.IdAware; 055import org.apache.camel.spi.OptimisticLockingAggregationRepository; 056import org.apache.camel.spi.RecoverableAggregationRepository; 057import org.apache.camel.spi.ShutdownAware; 058import org.apache.camel.spi.ShutdownPrepared; 059import org.apache.camel.spi.Synchronization; 060import org.apache.camel.support.DefaultTimeoutMap; 061import org.apache.camel.support.LoggingExceptionHandler; 062import org.apache.camel.support.ServiceSupport; 063import org.apache.camel.util.AsyncProcessorHelper; 064import org.apache.camel.util.ExchangeHelper; 065import org.apache.camel.util.LRUCacheFactory; 066import org.apache.camel.util.ObjectHelper; 067import org.apache.camel.util.ServiceHelper; 068import org.apache.camel.util.StopWatch; 069import org.apache.camel.util.TimeUtils; 070import org.slf4j.Logger; 071import org.slf4j.LoggerFactory; 072 073/** 074 * An implementation of the <a 075 * href="http://camel.apache.org/aggregator2.html">Aggregator</a> 076 * pattern where a batch of messages are processed (up to a maximum amount or 077 * until some timeout is reached) and messages for the same correlation key are 078 * combined together using some kind of {@link AggregationStrategy} 079 * (by default the latest message is used) to compress many message exchanges 080 * into a smaller number of exchanges. 081 * <p/> 082 * A good example of this is stock market data; you may be receiving 30,000 083 * messages/second and you may want to throttle it right down so that multiple 084 * messages for the same stock are combined (or just the latest message is used 085 * and older prices are discarded). Another idea is to combine line item messages 086 * together into a single invoice message. 087 */ 088public class AggregateProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, ShutdownPrepared, ShutdownAware, IdAware { 089 090 public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker"; 091 092 private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class); 093 094 private final Lock lock = new ReentrantLock(); 095 private final AtomicBoolean aggregateRepositoryWarned = new AtomicBoolean(); 096 private final CamelContext camelContext; 097 private final Processor processor; 098 private String id; 099 private AggregationStrategy aggregationStrategy; 100 private boolean preCompletion; 101 private Expression correlationExpression; 102 private AggregateController aggregateController; 103 private final ExecutorService executorService; 104 private final boolean shutdownExecutorService; 105 private OptimisticLockRetryPolicy optimisticLockRetryPolicy = new OptimisticLockRetryPolicy(); 106 private ScheduledExecutorService timeoutCheckerExecutorService; 107 private boolean shutdownTimeoutCheckerExecutorService; 108 private ScheduledExecutorService recoverService; 109 // store correlation key -> exchange id in timeout map 110 private TimeoutMap<String, String> timeoutMap; 111 private ExceptionHandler exceptionHandler; 112 private AggregationRepository aggregationRepository; 113 private Map<String, String> closedCorrelationKeys; 114 private final Set<String> batchConsumerCorrelationKeys = new ConcurrentSkipListSet<String>(); 115 private final Set<String> inProgressCompleteExchanges = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); 116 private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<String, RedeliveryData>(); 117 118 private final AggregateProcessorStatistics statistics = new Statistics(); 119 private final AtomicLong totalIn = new AtomicLong(); 120 private final AtomicLong totalCompleted = new AtomicLong(); 121 private final AtomicLong completedBySize = new AtomicLong(); 122 private final AtomicLong completedByStrategy = new AtomicLong(); 123 private final AtomicLong completedByInterval = new AtomicLong(); 124 private final AtomicLong completedByTimeout = new AtomicLong(); 125 private final AtomicLong completedByPredicate = new AtomicLong(); 126 private final AtomicLong completedByBatchConsumer = new AtomicLong(); 127 private final AtomicLong completedByForce = new AtomicLong(); 128 129 // keep booking about redelivery 130 private class RedeliveryData { 131 int redeliveryCounter; 132 } 133 134 private class Statistics implements AggregateProcessorStatistics { 135 136 private boolean statisticsEnabled = true; 137 138 public long getTotalIn() { 139 return totalIn.get(); 140 } 141 142 public long getTotalCompleted() { 143 return totalCompleted.get(); 144 } 145 146 public long getCompletedBySize() { 147 return completedBySize.get(); 148 } 149 150 public long getCompletedByStrategy() { 151 return completedByStrategy.get(); 152 } 153 154 public long getCompletedByInterval() { 155 return completedByInterval.get(); 156 } 157 158 public long getCompletedByTimeout() { 159 return completedByTimeout.get(); 160 } 161 162 public long getCompletedByPredicate() { 163 return completedByPredicate.get(); 164 } 165 166 public long getCompletedByBatchConsumer() { 167 return completedByBatchConsumer.get(); 168 } 169 170 public long getCompletedByForce() { 171 return completedByForce.get(); 172 } 173 174 public void reset() { 175 totalIn.set(0); 176 totalCompleted.set(0); 177 completedBySize.set(0); 178 completedByStrategy.set(0); 179 completedByTimeout.set(0); 180 completedByPredicate.set(0); 181 completedByBatchConsumer.set(0); 182 completedByForce.set(0); 183 } 184 185 public boolean isStatisticsEnabled() { 186 return statisticsEnabled; 187 } 188 189 public void setStatisticsEnabled(boolean statisticsEnabled) { 190 this.statisticsEnabled = statisticsEnabled; 191 } 192 } 193 194 // options 195 private boolean ignoreInvalidCorrelationKeys; 196 private Integer closeCorrelationKeyOnCompletion; 197 private boolean parallelProcessing; 198 private boolean optimisticLocking; 199 200 // different ways to have completion triggered 201 private boolean eagerCheckCompletion; 202 private Predicate completionPredicate; 203 private long completionTimeout; 204 private Expression completionTimeoutExpression; 205 private long completionInterval; 206 private int completionSize; 207 private Expression completionSizeExpression; 208 private boolean completionFromBatchConsumer; 209 private AtomicInteger batchConsumerCounter = new AtomicInteger(); 210 private boolean discardOnCompletionTimeout; 211 private boolean forceCompletionOnStop; 212 private boolean completeAllOnStop; 213 private long completionTimeoutCheckerInterval = 1000; 214 215 private ProducerTemplate deadLetterProducerTemplate; 216 217 public AggregateProcessor(CamelContext camelContext, Processor processor, 218 Expression correlationExpression, AggregationStrategy aggregationStrategy, 219 ExecutorService executorService, boolean shutdownExecutorService) { 220 ObjectHelper.notNull(camelContext, "camelContext"); 221 ObjectHelper.notNull(processor, "processor"); 222 ObjectHelper.notNull(correlationExpression, "correlationExpression"); 223 ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy"); 224 ObjectHelper.notNull(executorService, "executorService"); 225 this.camelContext = camelContext; 226 this.processor = processor; 227 this.correlationExpression = correlationExpression; 228 this.aggregationStrategy = aggregationStrategy; 229 this.executorService = executorService; 230 this.shutdownExecutorService = shutdownExecutorService; 231 this.exceptionHandler = new LoggingExceptionHandler(camelContext, getClass()); 232 } 233 234 @Override 235 public String toString() { 236 return "AggregateProcessor[to: " + processor + "]"; 237 } 238 239 public String getTraceLabel() { 240 return "aggregate[" + correlationExpression + "]"; 241 } 242 243 public List<Processor> next() { 244 if (!hasNext()) { 245 return null; 246 } 247 List<Processor> answer = new ArrayList<Processor>(1); 248 answer.add(processor); 249 return answer; 250 } 251 252 public boolean hasNext() { 253 return processor != null; 254 } 255 256 public String getId() { 257 return id; 258 } 259 260 public void setId(String id) { 261 this.id = id; 262 } 263 264 public void process(Exchange exchange) throws Exception { 265 AsyncProcessorHelper.process(this, exchange); 266 } 267 268 public boolean process(Exchange exchange, AsyncCallback callback) { 269 try { 270 doProcess(exchange); 271 } catch (Throwable e) { 272 exchange.setException(e); 273 } 274 callback.done(true); 275 return true; 276 } 277 278 protected void doProcess(Exchange exchange) throws Exception { 279 280 if (getStatistics().isStatisticsEnabled()) { 281 totalIn.incrementAndGet(); 282 } 283 284 //check for the special header to force completion of all groups (and ignore the exchange otherwise) 285 boolean completeAllGroups = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class); 286 if (completeAllGroups) { 287 forceCompletionOfAllGroups(); 288 return; 289 } 290 291 // compute correlation expression 292 String key = correlationExpression.evaluate(exchange, String.class); 293 if (ObjectHelper.isEmpty(key)) { 294 // we have a bad correlation key 295 if (isIgnoreInvalidCorrelationKeys()) { 296 LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange); 297 return; 298 } else { 299 throw new CamelExchangeException("Invalid correlation key", exchange); 300 } 301 } 302 303 // is the correlation key closed? 304 if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) { 305 throw new ClosedCorrelationKeyException(key, exchange); 306 } 307 308 // when optimist locking is enabled we keep trying until we succeed 309 if (optimisticLocking) { 310 List<Exchange> aggregated = null; 311 boolean exhaustedRetries = true; 312 int attempt = 0; 313 do { 314 attempt++; 315 // copy exchange, and do not share the unit of work 316 // the aggregated output runs in another unit of work 317 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false); 318 try { 319 aggregated = doAggregation(key, copy); 320 exhaustedRetries = false; 321 break; 322 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 323 LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}", 324 new Object[]{attempt, aggregationRepository, key, copy, e}); 325 optimisticLockRetryPolicy.doDelay(attempt); 326 } 327 } while (optimisticLockRetryPolicy.shouldRetry(attempt)); 328 329 if (exhaustedRetries) { 330 throw new CamelExchangeException("Exhausted optimistic locking retry attempts, tried " + attempt + " times", exchange, 331 new OptimisticLockingAggregationRepository.OptimisticLockingException()); 332 } else if (aggregated != null) { 333 // we are completed so submit to completion 334 for (Exchange agg : aggregated) { 335 onSubmitCompletion(key, agg); 336 } 337 } 338 } else { 339 // copy exchange, and do not share the unit of work 340 // the aggregated output runs in another unit of work 341 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false); 342 343 // when memory based then its fast using synchronized, but if the aggregation repository is IO 344 // bound such as JPA etc then concurrent aggregation per correlation key could 345 // improve performance as we can run aggregation repository get/add in parallel 346 List<Exchange> aggregated = null; 347 lock.lock(); 348 try { 349 aggregated = doAggregation(key, copy); 350 351 } finally { 352 lock.unlock(); 353 } 354 // we are completed so do that work outside the lock 355 if (aggregated != null) { 356 for (Exchange agg : aggregated) { 357 onSubmitCompletion(key, agg); 358 } 359 } 360 } 361 362 // check for the special header to force completion of all groups (inclusive of the message) 363 boolean completeAllGroupsInclusive = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE, false, boolean.class); 364 if (completeAllGroupsInclusive) { 365 forceCompletionOfAllGroups(); 366 } 367 } 368 369 /** 370 * Aggregates the exchange with the given correlation key 371 * <p/> 372 * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key 373 * in parallel. 374 * <p/> 375 * The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)} 376 * method which sends out the aggregated and completed {@link Exchange}. 377 * 378 * @param key the correlation key 379 * @param newExchange the exchange 380 * @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete 381 * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating 382 */ 383 private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException { 384 LOG.trace("onAggregation +++ start +++ with correlation key: {}", key); 385 386 List<Exchange> list = new ArrayList<Exchange>(); 387 String complete = null; 388 389 Exchange answer; 390 Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key); 391 Exchange oldExchange = originalExchange; 392 393 Integer size = 1; 394 if (oldExchange != null) { 395 // hack to support legacy AggregationStrategy's that modify and return the oldExchange, these will not 396 // working when using an identify based approach for optimistic locking like the MemoryAggregationRepository. 397 if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) { 398 oldExchange = originalExchange.copy(); 399 } 400 size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class); 401 size++; 402 } 403 404 // prepare the exchanges for aggregation 405 ExchangeHelper.prepareAggregation(oldExchange, newExchange); 406 407 // check if we are pre complete 408 if (preCompletion) { 409 try { 410 // put the current aggregated size on the exchange so its avail during completion check 411 newExchange.setProperty(Exchange.AGGREGATED_SIZE, size); 412 complete = isPreCompleted(key, oldExchange, newExchange); 413 // make sure to track timeouts if not complete 414 if (complete == null) { 415 trackTimeout(key, newExchange); 416 } 417 // remove it afterwards 418 newExchange.removeProperty(Exchange.AGGREGATED_SIZE); 419 } catch (Throwable e) { 420 // must catch any exception from aggregation 421 throw new CamelExchangeException("Error occurred during preComplete", newExchange, e); 422 } 423 } else if (isEagerCheckCompletion()) { 424 // put the current aggregated size on the exchange so its avail during completion check 425 newExchange.setProperty(Exchange.AGGREGATED_SIZE, size); 426 complete = isCompleted(key, newExchange); 427 // make sure to track timeouts if not complete 428 if (complete == null) { 429 trackTimeout(key, newExchange); 430 } 431 // remove it afterwards 432 newExchange.removeProperty(Exchange.AGGREGATED_SIZE); 433 } 434 435 if (preCompletion && complete != null) { 436 // need to pre complete the current group before we aggregate 437 doAggregationComplete(complete, list, key, originalExchange, oldExchange); 438 // as we complete the current group eager, we should indicate the new group is not complete 439 complete = null; 440 // and clear old/original exchange as we start on a new group 441 oldExchange = null; 442 originalExchange = null; 443 // and reset the size to 1 444 size = 1; 445 // make sure to track timeout as we just restart the correlation group when we are in pre completion mode 446 trackTimeout(key, newExchange); 447 } 448 449 // aggregate the exchanges 450 try { 451 answer = onAggregation(oldExchange, newExchange); 452 } catch (Throwable e) { 453 // must catch any exception from aggregation 454 throw new CamelExchangeException("Error occurred during aggregation", newExchange, e); 455 } 456 if (answer == null) { 457 throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange); 458 } 459 460 // special for some repository implementations 461 if (aggregationRepository instanceof RecoverableAggregationRepository) { 462 boolean valid = oldExchange == null || answer.getExchangeId().equals(oldExchange.getExchangeId()); 463 if (!valid && aggregateRepositoryWarned.compareAndSet(false, true)) { 464 LOG.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible" 465 + " as otherwise this can lead to unexpected behavior with some RecoverableAggregationRepository implementations"); 466 } 467 } 468 469 // update the aggregated size 470 answer.setProperty(Exchange.AGGREGATED_SIZE, size); 471 472 // maybe we should check completion after the aggregation 473 if (!preCompletion && !isEagerCheckCompletion()) { 474 complete = isCompleted(key, answer); 475 // make sure to track timeouts if not complete 476 if (complete == null) { 477 trackTimeout(key, newExchange); 478 } 479 } 480 481 if (complete == null) { 482 // only need to update aggregation repository if we are not complete 483 doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer); 484 } else { 485 // if we are complete then add the answer to the list 486 doAggregationComplete(complete, list, key, originalExchange, answer); 487 } 488 489 LOG.trace("onAggregation +++ end +++ with correlation key: {}", key); 490 return list; 491 } 492 493 protected void doAggregationComplete(String complete, List<Exchange> list, String key, Exchange originalExchange, Exchange answer) { 494 if ("consumer".equals(complete)) { 495 for (String batchKey : batchConsumerCorrelationKeys) { 496 Exchange batchAnswer; 497 if (batchKey.equals(key)) { 498 // skip the current aggregated key as we have already aggregated it and have the answer 499 batchAnswer = answer; 500 } else { 501 batchAnswer = aggregationRepository.get(camelContext, batchKey); 502 } 503 504 if (batchAnswer != null) { 505 batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete); 506 onCompletion(batchKey, originalExchange, batchAnswer, false); 507 list.add(batchAnswer); 508 } 509 } 510 batchConsumerCorrelationKeys.clear(); 511 // we have already submitted to completion, so answer should be null 512 answer = null; 513 } else if (answer != null) { 514 // we are complete for this exchange 515 answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete); 516 answer = onCompletion(key, originalExchange, answer, false); 517 } 518 519 if (answer != null) { 520 list.add(answer); 521 } 522 } 523 524 protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) { 525 LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", new Object[]{oldExchange, newExchange, key}); 526 if (optimisticLocking) { 527 try { 528 ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange); 529 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 530 onOptimisticLockingFailure(oldExchange, newExchange); 531 throw e; 532 } 533 } else { 534 aggregationRepository.add(camelContext, key, newExchange); 535 } 536 } 537 538 protected void onOptimisticLockingFailure(Exchange oldExchange, Exchange newExchange) { 539 AggregationStrategy strategy = aggregationStrategy; 540 if (strategy instanceof DelegateAggregationStrategy) { 541 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 542 } 543 if (strategy instanceof OptimisticLockingAwareAggregationStrategy) { 544 LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}", 545 new Object[]{strategy, oldExchange, newExchange}); 546 ((OptimisticLockingAwareAggregationStrategy)strategy).onOptimisticLockFailure(oldExchange, newExchange); 547 } 548 } 549 550 /** 551 * Tests whether the given exchanges is pre-complete or not 552 * 553 * @param key the correlation key 554 * @param oldExchange the existing exchange 555 * @param newExchange the incoming exchange 556 * @return <tt>null</tt> if not pre-completed, otherwise a String with the type that triggered the pre-completion 557 */ 558 protected String isPreCompleted(String key, Exchange oldExchange, Exchange newExchange) { 559 boolean complete = false; 560 AggregationStrategy strategy = aggregationStrategy; 561 if (strategy instanceof DelegateAggregationStrategy) { 562 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 563 } 564 if (strategy instanceof PreCompletionAwareAggregationStrategy) { 565 complete = ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange); 566 } 567 return complete ? "strategy" : null; 568 } 569 570 /** 571 * Tests whether the given exchange is complete or not 572 * 573 * @param key the correlation key 574 * @param exchange the incoming exchange 575 * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion 576 */ 577 protected String isCompleted(String key, Exchange exchange) { 578 // batch consumer completion must always run first 579 if (isCompletionFromBatchConsumer()) { 580 batchConsumerCorrelationKeys.add(key); 581 batchConsumerCounter.incrementAndGet(); 582 int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class); 583 if (size > 0 && batchConsumerCounter.intValue() >= size) { 584 // batch consumer is complete then reset the counter 585 batchConsumerCounter.set(0); 586 return "consumer"; 587 } 588 } 589 590 if (exchange.getProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, false, boolean.class)) { 591 return "strategy"; 592 } 593 594 if (getCompletionPredicate() != null) { 595 boolean answer = getCompletionPredicate().matches(exchange); 596 if (answer) { 597 return "predicate"; 598 } 599 } 600 601 boolean sizeChecked = false; 602 if (getCompletionSizeExpression() != null) { 603 Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class); 604 if (value != null && value > 0) { 605 // mark as already checked size as expression takes precedence over static configured 606 sizeChecked = true; 607 int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class); 608 if (size >= value) { 609 return "size"; 610 } 611 } 612 } 613 if (!sizeChecked && getCompletionSize() > 0) { 614 int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class); 615 if (size >= getCompletionSize()) { 616 return "size"; 617 } 618 } 619 620 // not complete 621 return null; 622 } 623 624 protected void trackTimeout(String key, Exchange exchange) { 625 // timeout can be either evaluated based on an expression or from a fixed value 626 // expression takes precedence 627 boolean timeoutSet = false; 628 if (getCompletionTimeoutExpression() != null) { 629 Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class); 630 if (value != null && value > 0) { 631 if (LOG.isTraceEnabled()) { 632 LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}", 633 new Object[]{key, value, exchange}); 634 } 635 addExchangeToTimeoutMap(key, exchange, value); 636 timeoutSet = true; 637 } 638 } 639 if (!timeoutSet && getCompletionTimeout() > 0) { 640 // timeout is used so use the timeout map to keep an eye on this 641 if (LOG.isTraceEnabled()) { 642 LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}", 643 new Object[]{key, getCompletionTimeout(), exchange}); 644 } 645 addExchangeToTimeoutMap(key, exchange, getCompletionTimeout()); 646 } 647 } 648 649 protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) { 650 return aggregationStrategy.aggregate(oldExchange, newExchange); 651 } 652 653 protected boolean onPreCompletionAggregation(Exchange oldExchange, Exchange newExchange) { 654 AggregationStrategy strategy = aggregationStrategy; 655 if (strategy instanceof DelegateAggregationStrategy) { 656 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 657 } 658 if (strategy instanceof PreCompletionAwareAggregationStrategy) { 659 return ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange); 660 } 661 return false; 662 } 663 664 protected Exchange onCompletion(final String key, final Exchange original, final Exchange aggregated, boolean fromTimeout) { 665 // store the correlation key as property before we remove so the repository has that information 666 if (original != null) { 667 original.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key); 668 } 669 aggregated.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key); 670 671 // only remove if we have previous added (as we could potentially complete with only 1 exchange) 672 // (if we have previous added then we have that as the original exchange) 673 if (original != null) { 674 // remove from repository as its completed, we do this first as to trigger any OptimisticLockingException's 675 aggregationRepository.remove(aggregated.getContext(), key, original); 676 } 677 678 if (!fromTimeout && timeoutMap != null) { 679 // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker) 680 LOG.trace("Removing correlation key {} from timeout", key); 681 timeoutMap.remove(key); 682 } 683 684 // this key has been closed so add it to the closed map 685 if (closedCorrelationKeys != null) { 686 closedCorrelationKeys.put(key, key); 687 } 688 689 if (fromTimeout) { 690 // invoke timeout if its timeout aware aggregation strategy, 691 // to allow any custom processing before discarding the exchange 692 AggregationStrategy strategy = aggregationStrategy; 693 if (strategy instanceof DelegateAggregationStrategy) { 694 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 695 } 696 if (strategy instanceof TimeoutAwareAggregationStrategy) { 697 long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1; 698 ((TimeoutAwareAggregationStrategy) strategy).timeout(aggregated, -1, -1, timeout); 699 } 700 } 701 702 Exchange answer; 703 if (fromTimeout && isDiscardOnCompletionTimeout()) { 704 // discard due timeout 705 LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated); 706 // must confirm the discarded exchange 707 aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId()); 708 // and remove redelivery state as well 709 redeliveryState.remove(aggregated.getExchangeId()); 710 // the completion was from timeout and we should just discard it 711 answer = null; 712 } else { 713 // the aggregated exchange should be published (sent out) 714 answer = aggregated; 715 } 716 717 return answer; 718 } 719 720 private void onSubmitCompletion(final String key, final Exchange exchange) { 721 LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange); 722 723 // add this as in progress before we submit the task 724 inProgressCompleteExchanges.add(exchange.getExchangeId()); 725 726 // invoke the on completion callback 727 AggregationStrategy target = aggregationStrategy; 728 if (target instanceof DelegateAggregationStrategy) { 729 target = ((DelegateAggregationStrategy) target).getDelegate(); 730 } 731 if (target instanceof CompletionAwareAggregationStrategy) { 732 ((CompletionAwareAggregationStrategy) target).onCompletion(exchange); 733 } 734 735 if (getStatistics().isStatisticsEnabled()) { 736 totalCompleted.incrementAndGet(); 737 738 String completedBy = exchange.getProperty(Exchange.AGGREGATED_COMPLETED_BY, String.class); 739 if ("interval".equals(completedBy)) { 740 completedByInterval.incrementAndGet(); 741 } else if ("timeout".equals(completedBy)) { 742 completedByTimeout.incrementAndGet(); 743 } else if ("force".equals(completedBy)) { 744 completedByForce.incrementAndGet(); 745 } else if ("consumer".equals(completedBy)) { 746 completedByBatchConsumer.incrementAndGet(); 747 } else if ("predicate".equals(completedBy)) { 748 completedByPredicate.incrementAndGet(); 749 } else if ("size".equals(completedBy)) { 750 completedBySize.incrementAndGet(); 751 } else if ("strategy".equals(completedBy)) { 752 completedByStrategy.incrementAndGet(); 753 } 754 } 755 756 // send this exchange 757 executorService.submit(new Runnable() { 758 public void run() { 759 LOG.debug("Processing aggregated exchange: {}", exchange); 760 761 // add on completion task so we remember to update the inProgressCompleteExchanges 762 exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId())); 763 764 try { 765 processor.process(exchange); 766 } catch (Throwable e) { 767 exchange.setException(e); 768 } 769 770 // log exception if there was a problem 771 if (exchange.getException() != null) { 772 // if there was an exception then let the exception handler handle it 773 getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException()); 774 } else { 775 LOG.trace("Processing aggregated exchange: {} complete.", exchange); 776 } 777 } 778 }); 779 } 780 781 /** 782 * Restores the timeout map with timeout values from the aggregation repository. 783 * <p/> 784 * This is needed in case the aggregator has been stopped and started again (for example a server restart). 785 * Then the existing exchanges from the {@link AggregationRepository} must have their timeout conditions restored. 786 */ 787 protected void restoreTimeoutMapFromAggregationRepository() throws Exception { 788 // grab the timeout value for each partly aggregated exchange 789 Set<String> keys = aggregationRepository.getKeys(); 790 if (keys == null || keys.isEmpty()) { 791 return; 792 } 793 794 StopWatch watch = new StopWatch(); 795 LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size()); 796 797 for (String key : keys) { 798 Exchange exchange = aggregationRepository.get(camelContext, key); 799 // grab the timeout value 800 long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0; 801 if (timeout > 0) { 802 LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout); 803 addExchangeToTimeoutMap(key, exchange, timeout); 804 } 805 } 806 807 // log duration of this task so end user can see how long it takes to pre-check this upon starting 808 LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}", 809 timeoutMap.size(), TimeUtils.printDuration(watch.taken())); 810 } 811 812 /** 813 * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts. 814 * 815 * @param key the correlation key 816 * @param exchange the exchange 817 * @param timeout the timeout value in millis 818 */ 819 private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) { 820 // store the timeout value on the exchange as well, in case we need it later 821 exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout); 822 timeoutMap.put(key, exchange.getExchangeId(), timeout); 823 } 824 825 /** 826 * Current number of closed correlation keys in the memory cache 827 */ 828 public int getClosedCorrelationKeysCacheSize() { 829 if (closedCorrelationKeys != null) { 830 return closedCorrelationKeys.size(); 831 } else { 832 return 0; 833 } 834 } 835 836 /** 837 * Clear all the closed correlation keys stored in the cache 838 */ 839 public void clearClosedCorrelationKeysCache() { 840 if (closedCorrelationKeys != null) { 841 closedCorrelationKeys.clear(); 842 } 843 } 844 845 public AggregateProcessorStatistics getStatistics() { 846 return statistics; 847 } 848 849 public int getInProgressCompleteExchanges() { 850 return inProgressCompleteExchanges.size(); 851 } 852 853 public Predicate getCompletionPredicate() { 854 return completionPredicate; 855 } 856 857 public void setCompletionPredicate(Predicate completionPredicate) { 858 this.completionPredicate = completionPredicate; 859 } 860 861 public boolean isEagerCheckCompletion() { 862 return eagerCheckCompletion; 863 } 864 865 public void setEagerCheckCompletion(boolean eagerCheckCompletion) { 866 this.eagerCheckCompletion = eagerCheckCompletion; 867 } 868 869 public long getCompletionTimeout() { 870 return completionTimeout; 871 } 872 873 public void setCompletionTimeout(long completionTimeout) { 874 this.completionTimeout = completionTimeout; 875 } 876 877 public Expression getCompletionTimeoutExpression() { 878 return completionTimeoutExpression; 879 } 880 881 public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) { 882 this.completionTimeoutExpression = completionTimeoutExpression; 883 } 884 885 public long getCompletionInterval() { 886 return completionInterval; 887 } 888 889 public void setCompletionInterval(long completionInterval) { 890 this.completionInterval = completionInterval; 891 } 892 893 public int getCompletionSize() { 894 return completionSize; 895 } 896 897 public void setCompletionSize(int completionSize) { 898 this.completionSize = completionSize; 899 } 900 901 public Expression getCompletionSizeExpression() { 902 return completionSizeExpression; 903 } 904 905 public void setCompletionSizeExpression(Expression completionSizeExpression) { 906 this.completionSizeExpression = completionSizeExpression; 907 } 908 909 public boolean isIgnoreInvalidCorrelationKeys() { 910 return ignoreInvalidCorrelationKeys; 911 } 912 913 public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) { 914 this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys; 915 } 916 917 public Integer getCloseCorrelationKeyOnCompletion() { 918 return closeCorrelationKeyOnCompletion; 919 } 920 921 public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) { 922 this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion; 923 } 924 925 public boolean isCompletionFromBatchConsumer() { 926 return completionFromBatchConsumer; 927 } 928 929 public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) { 930 this.completionFromBatchConsumer = completionFromBatchConsumer; 931 } 932 933 public boolean isCompleteAllOnStop() { 934 return completeAllOnStop; 935 } 936 937 public long getCompletionTimeoutCheckerInterval() { 938 return completionTimeoutCheckerInterval; 939 } 940 941 public void setCompletionTimeoutCheckerInterval(long completionTimeoutCheckerInterval) { 942 this.completionTimeoutCheckerInterval = completionTimeoutCheckerInterval; 943 } 944 945 public ExceptionHandler getExceptionHandler() { 946 return exceptionHandler; 947 } 948 949 public void setExceptionHandler(ExceptionHandler exceptionHandler) { 950 this.exceptionHandler = exceptionHandler; 951 } 952 953 public boolean isParallelProcessing() { 954 return parallelProcessing; 955 } 956 957 public void setParallelProcessing(boolean parallelProcessing) { 958 this.parallelProcessing = parallelProcessing; 959 } 960 961 public boolean isOptimisticLocking() { 962 return optimisticLocking; 963 } 964 965 public void setOptimisticLocking(boolean optimisticLocking) { 966 this.optimisticLocking = optimisticLocking; 967 } 968 969 public AggregationRepository getAggregationRepository() { 970 return aggregationRepository; 971 } 972 973 public void setAggregationRepository(AggregationRepository aggregationRepository) { 974 this.aggregationRepository = aggregationRepository; 975 } 976 977 public boolean isDiscardOnCompletionTimeout() { 978 return discardOnCompletionTimeout; 979 } 980 981 public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) { 982 this.discardOnCompletionTimeout = discardOnCompletionTimeout; 983 } 984 985 public void setForceCompletionOnStop(boolean forceCompletionOnStop) { 986 this.forceCompletionOnStop = forceCompletionOnStop; 987 } 988 989 public void setCompleteAllOnStop(boolean completeAllOnStop) { 990 this.completeAllOnStop = completeAllOnStop; 991 } 992 993 public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) { 994 this.timeoutCheckerExecutorService = timeoutCheckerExecutorService; 995 } 996 997 public ScheduledExecutorService getTimeoutCheckerExecutorService() { 998 return timeoutCheckerExecutorService; 999 } 1000 1001 public boolean isShutdownTimeoutCheckerExecutorService() { 1002 return shutdownTimeoutCheckerExecutorService; 1003 } 1004 1005 public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) { 1006 this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService; 1007 } 1008 1009 public void setOptimisticLockRetryPolicy(OptimisticLockRetryPolicy optimisticLockRetryPolicy) { 1010 this.optimisticLockRetryPolicy = optimisticLockRetryPolicy; 1011 } 1012 1013 public OptimisticLockRetryPolicy getOptimisticLockRetryPolicy() { 1014 return optimisticLockRetryPolicy; 1015 } 1016 1017 public AggregationStrategy getAggregationStrategy() { 1018 return aggregationStrategy; 1019 } 1020 1021 public void setAggregationStrategy(AggregationStrategy aggregationStrategy) { 1022 this.aggregationStrategy = aggregationStrategy; 1023 } 1024 1025 public Expression getCorrelationExpression() { 1026 return correlationExpression; 1027 } 1028 1029 public void setCorrelationExpression(Expression correlationExpression) { 1030 this.correlationExpression = correlationExpression; 1031 } 1032 1033 public AggregateController getAggregateController() { 1034 return aggregateController; 1035 } 1036 1037 public void setAggregateController(AggregateController aggregateController) { 1038 this.aggregateController = aggregateController; 1039 } 1040 1041 /** 1042 * On completion task which keeps the booking of the in progress up to date 1043 */ 1044 private final class AggregateOnCompletion implements Synchronization { 1045 private final String exchangeId; 1046 1047 private AggregateOnCompletion(String exchangeId) { 1048 // must use the original exchange id as it could potentially change if send over SEDA etc. 1049 this.exchangeId = exchangeId; 1050 } 1051 1052 public void onFailure(Exchange exchange) { 1053 LOG.trace("Aggregated exchange onFailure: {}", exchange); 1054 1055 // must remember to remove in progress when we failed 1056 inProgressCompleteExchanges.remove(exchangeId); 1057 // do not remove redelivery state as we need it when we redeliver again later 1058 } 1059 1060 public void onComplete(Exchange exchange) { 1061 LOG.trace("Aggregated exchange onComplete: {}", exchange); 1062 1063 // only confirm if we processed without a problem 1064 try { 1065 aggregationRepository.confirm(exchange.getContext(), exchangeId); 1066 // and remove redelivery state as well 1067 redeliveryState.remove(exchangeId); 1068 } finally { 1069 // must remember to remove in progress when we are complete 1070 inProgressCompleteExchanges.remove(exchangeId); 1071 } 1072 } 1073 1074 @Override 1075 public String toString() { 1076 return "AggregateOnCompletion"; 1077 } 1078 } 1079 1080 /** 1081 * Background task that looks for aggregated exchanges which is triggered by completion timeouts. 1082 */ 1083 private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> { 1084 1085 private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) { 1086 // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead 1087 super(executor, requestMapPollTimeMillis, optimisticLocking); 1088 } 1089 1090 @Override 1091 public void purge() { 1092 // must acquire the shared aggregation lock to be able to purge 1093 if (!optimisticLocking) { 1094 lock.lock(); 1095 } 1096 try { 1097 super.purge(); 1098 } finally { 1099 if (!optimisticLocking) { 1100 lock.unlock(); 1101 } 1102 } 1103 } 1104 1105 @Override 1106 public boolean onEviction(String key, String exchangeId) { 1107 log.debug("Completion timeout triggered for correlation key: {}", key); 1108 1109 boolean inProgress = inProgressCompleteExchanges.contains(exchangeId); 1110 if (inProgress) { 1111 LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId); 1112 return true; 1113 } 1114 1115 // get the aggregated exchange 1116 boolean evictionStolen = false; 1117 Exchange answer = aggregationRepository.get(camelContext, key); 1118 if (answer == null) { 1119 evictionStolen = true; 1120 } else { 1121 // indicate it was completed by timeout 1122 answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout"); 1123 try { 1124 answer = onCompletion(key, answer, answer, true); 1125 if (answer != null) { 1126 onSubmitCompletion(key, answer); 1127 } 1128 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 1129 evictionStolen = true; 1130 } 1131 } 1132 1133 if (optimisticLocking && evictionStolen) { 1134 LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction " 1135 + "for exchange with id: {} and correlation id: {}", exchangeId, key); 1136 } 1137 return true; 1138 } 1139 } 1140 1141 /** 1142 * Background task that triggers completion based on interval. 1143 */ 1144 private final class AggregationIntervalTask implements Runnable { 1145 1146 public void run() { 1147 // only run if CamelContext has been fully started 1148 if (!camelContext.getStatus().isStarted()) { 1149 LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName()); 1150 return; 1151 } 1152 1153 LOG.trace("Starting completion interval task"); 1154 1155 // trigger completion for all in the repository 1156 Set<String> keys = aggregationRepository.getKeys(); 1157 1158 if (keys != null && !keys.isEmpty()) { 1159 // must acquire the shared aggregation lock to be able to trigger interval completion 1160 if (!optimisticLocking) { 1161 lock.lock(); 1162 } 1163 try { 1164 for (String key : keys) { 1165 boolean stolenInterval = false; 1166 Exchange exchange = aggregationRepository.get(camelContext, key); 1167 if (exchange == null) { 1168 stolenInterval = true; 1169 } else { 1170 LOG.trace("Completion interval triggered for correlation key: {}", key); 1171 // indicate it was completed by interval 1172 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval"); 1173 try { 1174 Exchange answer = onCompletion(key, exchange, exchange, false); 1175 if (answer != null) { 1176 onSubmitCompletion(key, answer); 1177 } 1178 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 1179 stolenInterval = true; 1180 } 1181 } 1182 if (optimisticLocking && stolenInterval) { 1183 LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key); 1184 } 1185 } 1186 } finally { 1187 if (!optimisticLocking) { 1188 lock.unlock(); 1189 } 1190 } 1191 } 1192 1193 LOG.trace("Completion interval task complete"); 1194 } 1195 } 1196 1197 /** 1198 * Background task that looks for aggregated exchanges to recover. 1199 */ 1200 private final class RecoverTask implements Runnable { 1201 private final RecoverableAggregationRepository recoverable; 1202 1203 private RecoverTask(RecoverableAggregationRepository recoverable) { 1204 this.recoverable = recoverable; 1205 } 1206 1207 public void run() { 1208 // only run if CamelContext has been fully started 1209 if (!camelContext.getStatus().isStarted()) { 1210 LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName()); 1211 return; 1212 } 1213 1214 LOG.trace("Starting recover check"); 1215 1216 // copy the current in progress before doing scan 1217 final Set<String> copyOfInProgress = new LinkedHashSet<String>(inProgressCompleteExchanges); 1218 1219 Set<String> exchangeIds = recoverable.scan(camelContext); 1220 for (String exchangeId : exchangeIds) { 1221 1222 // we may shutdown while doing recovery 1223 if (!isRunAllowed()) { 1224 LOG.info("We are shutting down so stop recovering"); 1225 return; 1226 } 1227 if (!optimisticLocking) { 1228 lock.lock(); 1229 } 1230 try { 1231 // consider in progress if it was in progress before we did the scan, or currently after we did the scan 1232 // its safer to consider it in progress than risk duplicates due both in progress + recovered 1233 boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId); 1234 if (inProgress) { 1235 LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId); 1236 } else { 1237 LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId); 1238 Exchange exchange = recoverable.recover(camelContext, exchangeId); 1239 if (exchange != null) { 1240 // get the correlation key 1241 String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class); 1242 // and mark it as redelivered 1243 exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE); 1244 1245 // get the current redelivery data 1246 RedeliveryData data = redeliveryState.get(exchange.getExchangeId()); 1247 1248 // if we are exhausted, then move to dead letter channel 1249 if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) { 1250 LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries() 1251 + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri()); 1252 1253 // send to DLC 1254 try { 1255 // set redelivery counter 1256 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter); 1257 exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE); 1258 deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange); 1259 } catch (Throwable e) { 1260 exchange.setException(e); 1261 } 1262 1263 // handle if failed 1264 if (exchange.getException() != null) { 1265 getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException()); 1266 } else { 1267 // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again 1268 recoverable.confirm(camelContext, exchangeId); 1269 } 1270 } else { 1271 // update current redelivery state 1272 if (data == null) { 1273 // create new data 1274 data = new RedeliveryData(); 1275 redeliveryState.put(exchange.getExchangeId(), data); 1276 } 1277 data.redeliveryCounter++; 1278 1279 // set redelivery counter 1280 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter); 1281 if (recoverable.getMaximumRedeliveries() > 0) { 1282 exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries()); 1283 } 1284 1285 LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId); 1286 1287 // not exhaust so resubmit the recovered exchange 1288 onSubmitCompletion(key, exchange); 1289 } 1290 } 1291 } 1292 } finally { 1293 if (!optimisticLocking) { 1294 lock.unlock(); 1295 } 1296 } 1297 } 1298 1299 LOG.trace("Recover check complete"); 1300 } 1301 } 1302 1303 @Override 1304 @SuppressWarnings("unchecked") 1305 protected void doStart() throws Exception { 1306 AggregationStrategy strategy = aggregationStrategy; 1307 if (strategy instanceof DelegateAggregationStrategy) { 1308 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 1309 } 1310 if (strategy instanceof CamelContextAware) { 1311 ((CamelContextAware) strategy).setCamelContext(camelContext); 1312 } 1313 if (strategy instanceof PreCompletionAwareAggregationStrategy) { 1314 preCompletion = true; 1315 LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId()); 1316 } 1317 1318 if (!preCompletion) { 1319 // if not in pre completion mode then check we configured the completion required 1320 if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null 1321 && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null 1322 && getCompletionSizeExpression() == null) { 1323 throw new IllegalStateException("At least one of the completions options" 1324 + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set"); 1325 } 1326 } 1327 1328 if (getCloseCorrelationKeyOnCompletion() != null) { 1329 if (getCloseCorrelationKeyOnCompletion() > 0) { 1330 LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of " + getCloseCorrelationKeyOnCompletion()); 1331 closedCorrelationKeys = LRUCacheFactory.newLRUCache(getCloseCorrelationKeyOnCompletion()); 1332 } else { 1333 LOG.info("Using ClosedCorrelationKeys with unbounded capacity"); 1334 closedCorrelationKeys = new ConcurrentHashMap<String, String>(); 1335 } 1336 } 1337 1338 if (aggregationRepository == null) { 1339 aggregationRepository = new MemoryAggregationRepository(optimisticLocking); 1340 LOG.info("Defaulting to MemoryAggregationRepository"); 1341 } 1342 1343 if (optimisticLocking) { 1344 if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) { 1345 throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository"); 1346 } 1347 LOG.info("Optimistic locking is enabled"); 1348 } 1349 1350 ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository); 1351 1352 // should we use recover checker 1353 if (aggregationRepository instanceof RecoverableAggregationRepository) { 1354 RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository; 1355 if (recoverable.isUseRecovery()) { 1356 long interval = recoverable.getRecoveryIntervalInMillis(); 1357 if (interval <= 0) { 1358 throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval); 1359 } 1360 1361 // create a background recover thread to check every interval 1362 recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1); 1363 Runnable recoverTask = new RecoverTask(recoverable); 1364 LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every " + interval + " millis."); 1365 // use fixed delay so there is X interval between each run 1366 recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS); 1367 1368 if (recoverable.getDeadLetterUri() != null) { 1369 int max = recoverable.getMaximumRedeliveries(); 1370 if (max <= 0) { 1371 throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max); 1372 } 1373 LOG.info("After " + max + " failed redelivery attempts Exchanges will be moved to deadLetterUri: " + recoverable.getDeadLetterUri()); 1374 1375 // dead letter uri must be a valid endpoint 1376 Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri()); 1377 if (endpoint == null) { 1378 throw new NoSuchEndpointException(recoverable.getDeadLetterUri()); 1379 } 1380 deadLetterProducerTemplate = camelContext.createProducerTemplate(); 1381 } 1382 } 1383 } 1384 1385 if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) { 1386 throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both."); 1387 } 1388 if (getCompletionInterval() > 0) { 1389 LOG.info("Using CompletionInterval to run every " + getCompletionInterval() + " millis."); 1390 if (getTimeoutCheckerExecutorService() == null) { 1391 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1)); 1392 shutdownTimeoutCheckerExecutorService = true; 1393 } 1394 // trigger completion based on interval 1395 getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS); 1396 } 1397 1398 // start timeout service if its in use 1399 if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) { 1400 LOG.info("Using CompletionTimeout to trigger after " + getCompletionTimeout() + " millis of inactivity."); 1401 if (getTimeoutCheckerExecutorService() == null) { 1402 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1)); 1403 shutdownTimeoutCheckerExecutorService = true; 1404 } 1405 // check for timed out aggregated messages once every second 1406 timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), getCompletionTimeoutCheckerInterval()); 1407 // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we 1408 // need to re-establish the timeout map so timeout can trigger 1409 restoreTimeoutMapFromAggregationRepository(); 1410 ServiceHelper.startService(timeoutMap); 1411 } 1412 1413 if (aggregateController == null) { 1414 aggregateController = new DefaultAggregateController(); 1415 } 1416 aggregateController.onStart(this); 1417 } 1418 1419 @Override 1420 protected void doStop() throws Exception { 1421 // note: we cannot do doForceCompletionOnStop from this doStop method 1422 // as this is handled in the prepareShutdown method which is also invoked when stopping a route 1423 // and is better suited for preparing to shutdown than this doStop method is 1424 1425 if (aggregateController != null) { 1426 aggregateController.onStop(this); 1427 } 1428 1429 if (recoverService != null) { 1430 camelContext.getExecutorServiceManager().shutdown(recoverService); 1431 } 1432 ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate); 1433 1434 if (closedCorrelationKeys != null) { 1435 // it may be a service so stop it as well 1436 ServiceHelper.stopService(closedCorrelationKeys); 1437 closedCorrelationKeys.clear(); 1438 } 1439 batchConsumerCorrelationKeys.clear(); 1440 redeliveryState.clear(); 1441 } 1442 1443 @Override 1444 public void prepareShutdown(boolean suspendOnly, boolean forced) { 1445 // we are shutting down, so force completion if this option was enabled 1446 // but only do this when forced=false, as that is when we have chance to 1447 // send out new messages to be routed by Camel. When forced=true, then 1448 // we have to shutdown in a hurry 1449 if (!forced && forceCompletionOnStop) { 1450 doForceCompletionOnStop(); 1451 } 1452 } 1453 1454 @Override 1455 public boolean deferShutdown(ShutdownRunningTask shutdownRunningTask) { 1456 // not in use 1457 return true; 1458 } 1459 1460 @Override 1461 public int getPendingExchangesSize() { 1462 if (completeAllOnStop) { 1463 // we want to regard all pending exchanges in the repo as inflight 1464 Set<String> keys = getAggregationRepository().getKeys(); 1465 return keys != null ? keys.size() : 0; 1466 } else { 1467 return 0; 1468 } 1469 } 1470 1471 private void doForceCompletionOnStop() { 1472 int expected = forceCompletionOfAllGroups(); 1473 1474 StopWatch watch = new StopWatch(); 1475 while (inProgressCompleteExchanges.size() > 0) { 1476 LOG.trace("Waiting for {} inflight exchanges to complete", getInProgressCompleteExchanges()); 1477 try { 1478 Thread.sleep(100); 1479 } catch (InterruptedException e) { 1480 // break out as we got interrupted such as the JVM terminating 1481 LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", getInProgressCompleteExchanges()); 1482 break; 1483 } 1484 } 1485 1486 if (expected > 0) { 1487 LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.taken())); 1488 } 1489 } 1490 1491 @Override 1492 protected void doShutdown() throws Exception { 1493 // shutdown aggregation repository and the strategy 1494 ServiceHelper.stopAndShutdownServices(aggregationRepository, aggregationStrategy); 1495 1496 // cleanup when shutting down 1497 inProgressCompleteExchanges.clear(); 1498 1499 if (shutdownExecutorService) { 1500 camelContext.getExecutorServiceManager().shutdownNow(executorService); 1501 } 1502 if (shutdownTimeoutCheckerExecutorService) { 1503 camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService); 1504 timeoutCheckerExecutorService = null; 1505 } 1506 1507 super.doShutdown(); 1508 } 1509 1510 public int forceCompletionOfGroup(String key) { 1511 // must acquire the shared aggregation lock to be able to trigger force completion 1512 int total = 0; 1513 1514 if (!optimisticLocking) { 1515 lock.lock(); 1516 } 1517 try { 1518 Exchange exchange = aggregationRepository.get(camelContext, key); 1519 if (exchange != null) { 1520 total = 1; 1521 LOG.trace("Force completion triggered for correlation key: {}", key); 1522 // indicate it was completed by a force completion request 1523 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force"); 1524 Exchange answer = onCompletion(key, exchange, exchange, false); 1525 if (answer != null) { 1526 onSubmitCompletion(key, answer); 1527 } 1528 } 1529 } finally { 1530 if (!optimisticLocking) { 1531 lock.unlock(); 1532 } 1533 } 1534 LOG.trace("Completed force completion of group {}", key); 1535 1536 if (total > 0) { 1537 LOG.debug("Forcing completion of group {} with {} exchanges", key, total); 1538 } 1539 return total; 1540 } 1541 1542 public int forceCompletionOfAllGroups() { 1543 1544 // only run if CamelContext has been fully started or is stopping 1545 boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping(); 1546 if (!allow) { 1547 LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName()); 1548 return 0; 1549 } 1550 1551 LOG.trace("Starting force completion of all groups task"); 1552 1553 // trigger completion for all in the repository 1554 Set<String> keys = aggregationRepository.getKeys(); 1555 1556 int total = 0; 1557 if (keys != null && !keys.isEmpty()) { 1558 // must acquire the shared aggregation lock to be able to trigger force completion 1559 if (!optimisticLocking) { 1560 lock.lock(); 1561 } 1562 total = keys.size(); 1563 try { 1564 for (String key : keys) { 1565 Exchange exchange = aggregationRepository.get(camelContext, key); 1566 if (exchange != null) { 1567 LOG.trace("Force completion triggered for correlation key: {}", key); 1568 // indicate it was completed by a force completion request 1569 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force"); 1570 Exchange answer = onCompletion(key, exchange, exchange, false); 1571 if (answer != null) { 1572 onSubmitCompletion(key, answer); 1573 } 1574 } 1575 } 1576 } finally { 1577 if (!optimisticLocking) { 1578 lock.unlock(); 1579 } 1580 } 1581 } 1582 LOG.trace("Completed force completion of all groups task"); 1583 1584 if (total > 0) { 1585 LOG.debug("Forcing completion of all groups with {} exchanges", total); 1586 } 1587 return total; 1588 } 1589 1590}