001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.camel.processor.aggregate; 018 019import java.util.ArrayList; 020import java.util.Collections; 021import java.util.LinkedHashSet; 022import java.util.List; 023import java.util.Map; 024import java.util.Set; 025import java.util.concurrent.ConcurrentHashMap; 026import java.util.concurrent.ConcurrentSkipListSet; 027import java.util.concurrent.ExecutorService; 028import java.util.concurrent.ScheduledExecutorService; 029import java.util.concurrent.TimeUnit; 030import java.util.concurrent.atomic.AtomicBoolean; 031import java.util.concurrent.atomic.AtomicInteger; 032import java.util.concurrent.atomic.AtomicLong; 033import java.util.concurrent.locks.Lock; 034import java.util.concurrent.locks.ReentrantLock; 035 036import org.apache.camel.AsyncCallback; 037import org.apache.camel.AsyncProcessor; 038import org.apache.camel.CamelContext; 039import org.apache.camel.CamelContextAware; 040import org.apache.camel.CamelExchangeException; 041import org.apache.camel.Endpoint; 042import org.apache.camel.Exchange; 043import org.apache.camel.Expression; 044import org.apache.camel.Navigate; 045import org.apache.camel.NoSuchEndpointException; 046import org.apache.camel.Predicate; 047import org.apache.camel.Processor; 048import org.apache.camel.ProducerTemplate; 049import org.apache.camel.ShutdownRunningTask; 050import org.apache.camel.TimeoutMap; 051import org.apache.camel.Traceable; 052import org.apache.camel.spi.AggregationRepository; 053import org.apache.camel.spi.ExceptionHandler; 054import org.apache.camel.spi.IdAware; 055import org.apache.camel.spi.OptimisticLockingAggregationRepository; 056import org.apache.camel.spi.RecoverableAggregationRepository; 057import org.apache.camel.spi.ShutdownAware; 058import org.apache.camel.spi.ShutdownPrepared; 059import org.apache.camel.spi.Synchronization; 060import org.apache.camel.support.DefaultTimeoutMap; 061import org.apache.camel.support.LoggingExceptionHandler; 062import org.apache.camel.support.ServiceSupport; 063import org.apache.camel.util.AsyncProcessorHelper; 064import org.apache.camel.util.ExchangeHelper; 065import org.apache.camel.util.LRUCacheFactory; 066import org.apache.camel.util.ObjectHelper; 067import org.apache.camel.util.ServiceHelper; 068import org.apache.camel.util.StopWatch; 069import org.apache.camel.util.TimeUtils; 070import org.slf4j.Logger; 071import org.slf4j.LoggerFactory; 072 073/** 074 * An implementation of the <a 075 * href="http://camel.apache.org/aggregator2.html">Aggregator</a> 076 * pattern where a batch of messages are processed (up to a maximum amount or 077 * until some timeout is reached) and messages for the same correlation key are 078 * combined together using some kind of {@link AggregationStrategy} 079 * (by default the latest message is used) to compress many message exchanges 080 * into a smaller number of exchanges. 081 * <p/> 082 * A good example of this is stock market data; you may be receiving 30,000 083 * messages/second and you may want to throttle it right down so that multiple 084 * messages for the same stock are combined (or just the latest message is used 085 * and older prices are discarded). Another idea is to combine line item messages 086 * together into a single invoice message. 087 */ 088public class AggregateProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, ShutdownPrepared, ShutdownAware, IdAware { 089 090 public static final String AGGREGATE_TIMEOUT_CHECKER = "AggregateTimeoutChecker"; 091 092 private static final Logger LOG = LoggerFactory.getLogger(AggregateProcessor.class); 093 094 private final Lock lock = new ReentrantLock(); 095 private final AtomicBoolean aggregateRepositoryWarned = new AtomicBoolean(); 096 private final CamelContext camelContext; 097 private final Processor processor; 098 private String id; 099 private AggregationStrategy aggregationStrategy; 100 private boolean preCompletion; 101 private Expression correlationExpression; 102 private AggregateController aggregateController; 103 private final ExecutorService executorService; 104 private final boolean shutdownExecutorService; 105 private OptimisticLockRetryPolicy optimisticLockRetryPolicy = new OptimisticLockRetryPolicy(); 106 private ScheduledExecutorService timeoutCheckerExecutorService; 107 private boolean shutdownTimeoutCheckerExecutorService; 108 private ScheduledExecutorService recoverService; 109 // store correlation key -> exchange id in timeout map 110 private TimeoutMap<String, String> timeoutMap; 111 private ExceptionHandler exceptionHandler; 112 private AggregationRepository aggregationRepository; 113 private Map<String, String> closedCorrelationKeys; 114 private final Set<String> batchConsumerCorrelationKeys = new ConcurrentSkipListSet<>(); 115 private final Set<String> inProgressCompleteExchanges = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); 116 private final Map<String, RedeliveryData> redeliveryState = new ConcurrentHashMap<>(); 117 118 private final AggregateProcessorStatistics statistics = new Statistics(); 119 private final AtomicLong totalIn = new AtomicLong(); 120 private final AtomicLong totalCompleted = new AtomicLong(); 121 private final AtomicLong completedBySize = new AtomicLong(); 122 private final AtomicLong completedByStrategy = new AtomicLong(); 123 private final AtomicLong completedByInterval = new AtomicLong(); 124 private final AtomicLong completedByTimeout = new AtomicLong(); 125 private final AtomicLong completedByPredicate = new AtomicLong(); 126 private final AtomicLong completedByBatchConsumer = new AtomicLong(); 127 private final AtomicLong completedByForce = new AtomicLong(); 128 129 // keep booking about redelivery 130 private class RedeliveryData { 131 int redeliveryCounter; 132 } 133 134 private class Statistics implements AggregateProcessorStatistics { 135 136 private boolean statisticsEnabled = true; 137 138 public long getTotalIn() { 139 return totalIn.get(); 140 } 141 142 public long getTotalCompleted() { 143 return totalCompleted.get(); 144 } 145 146 public long getCompletedBySize() { 147 return completedBySize.get(); 148 } 149 150 public long getCompletedByStrategy() { 151 return completedByStrategy.get(); 152 } 153 154 public long getCompletedByInterval() { 155 return completedByInterval.get(); 156 } 157 158 public long getCompletedByTimeout() { 159 return completedByTimeout.get(); 160 } 161 162 public long getCompletedByPredicate() { 163 return completedByPredicate.get(); 164 } 165 166 public long getCompletedByBatchConsumer() { 167 return completedByBatchConsumer.get(); 168 } 169 170 public long getCompletedByForce() { 171 return completedByForce.get(); 172 } 173 174 public void reset() { 175 totalIn.set(0); 176 totalCompleted.set(0); 177 completedBySize.set(0); 178 completedByStrategy.set(0); 179 completedByTimeout.set(0); 180 completedByPredicate.set(0); 181 completedByBatchConsumer.set(0); 182 completedByForce.set(0); 183 } 184 185 public boolean isStatisticsEnabled() { 186 return statisticsEnabled; 187 } 188 189 public void setStatisticsEnabled(boolean statisticsEnabled) { 190 this.statisticsEnabled = statisticsEnabled; 191 } 192 } 193 194 // options 195 private boolean ignoreInvalidCorrelationKeys; 196 private Integer closeCorrelationKeyOnCompletion; 197 private boolean parallelProcessing; 198 private boolean optimisticLocking; 199 200 // different ways to have completion triggered 201 private boolean eagerCheckCompletion; 202 private Predicate completionPredicate; 203 private long completionTimeout; 204 private Expression completionTimeoutExpression; 205 private long completionInterval; 206 private int completionSize; 207 private Expression completionSizeExpression; 208 private boolean completionFromBatchConsumer; 209 private boolean completionOnNewCorrelationGroup; 210 private AtomicInteger batchConsumerCounter = new AtomicInteger(); 211 private boolean discardOnCompletionTimeout; 212 private boolean forceCompletionOnStop; 213 private boolean completeAllOnStop; 214 private long completionTimeoutCheckerInterval = 1000; 215 216 private ProducerTemplate deadLetterProducerTemplate; 217 218 public AggregateProcessor(CamelContext camelContext, Processor processor, 219 Expression correlationExpression, AggregationStrategy aggregationStrategy, 220 ExecutorService executorService, boolean shutdownExecutorService) { 221 ObjectHelper.notNull(camelContext, "camelContext"); 222 ObjectHelper.notNull(processor, "processor"); 223 ObjectHelper.notNull(correlationExpression, "correlationExpression"); 224 ObjectHelper.notNull(aggregationStrategy, "aggregationStrategy"); 225 ObjectHelper.notNull(executorService, "executorService"); 226 this.camelContext = camelContext; 227 this.processor = processor; 228 this.correlationExpression = correlationExpression; 229 this.aggregationStrategy = aggregationStrategy; 230 this.executorService = executorService; 231 this.shutdownExecutorService = shutdownExecutorService; 232 this.exceptionHandler = new LoggingExceptionHandler(camelContext, getClass()); 233 } 234 235 @Override 236 public String toString() { 237 return "AggregateProcessor[to: " + processor + "]"; 238 } 239 240 public String getTraceLabel() { 241 return "aggregate[" + correlationExpression + "]"; 242 } 243 244 public List<Processor> next() { 245 if (!hasNext()) { 246 return null; 247 } 248 List<Processor> answer = new ArrayList<>(1); 249 answer.add(processor); 250 return answer; 251 } 252 253 public boolean hasNext() { 254 return processor != null; 255 } 256 257 public String getId() { 258 return id; 259 } 260 261 public void setId(String id) { 262 this.id = id; 263 } 264 265 public void process(Exchange exchange) throws Exception { 266 AsyncProcessorHelper.process(this, exchange); 267 } 268 269 public boolean process(Exchange exchange, AsyncCallback callback) { 270 try { 271 doProcess(exchange); 272 } catch (Throwable e) { 273 exchange.setException(e); 274 } 275 callback.done(true); 276 return true; 277 } 278 279 protected void doProcess(Exchange exchange) throws Exception { 280 281 if (getStatistics().isStatisticsEnabled()) { 282 totalIn.incrementAndGet(); 283 } 284 285 //check for the special header to force completion of all groups (and ignore the exchange otherwise) 286 boolean completeAllGroups = isCompleteAllGroups(exchange); 287 if (completeAllGroups) { 288 // remove the header so we do not complete again 289 removeFlagCompleteAllGroups(exchange); 290 forceCompletionOfAllGroups(); 291 return; 292 } 293 294 // compute correlation expression 295 String key = correlationExpression.evaluate(exchange, String.class); 296 if (ObjectHelper.isEmpty(key)) { 297 // we have a bad correlation key 298 if (isIgnoreInvalidCorrelationKeys()) { 299 LOG.debug("Invalid correlation key. This Exchange will be ignored: {}", exchange); 300 return; 301 } else { 302 throw new CamelExchangeException("Invalid correlation key", exchange); 303 } 304 } 305 306 // is the correlation key closed? 307 if (closedCorrelationKeys != null && closedCorrelationKeys.containsKey(key)) { 308 throw new ClosedCorrelationKeyException(key, exchange); 309 } 310 311 // when optimist locking is enabled we keep trying until we succeed 312 if (optimisticLocking) { 313 List<Exchange> aggregated = null; 314 boolean exhaustedRetries = true; 315 int attempt = 0; 316 do { 317 attempt++; 318 // copy exchange, and do not share the unit of work 319 // the aggregated output runs in another unit of work 320 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false); 321 322 // remove the complete all groups headers as it should not be on the copy 323 removeFlagCompleteCurrentGroup(copy); 324 removeFlagCompleteAllGroups(copy); 325 removeFlagCompleteAllGroupsInclusive(copy); 326 327 try { 328 aggregated = doAggregation(key, copy); 329 exhaustedRetries = false; 330 break; 331 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 332 LOG.trace("On attempt {} OptimisticLockingAggregationRepository: {} threw OptimisticLockingException while trying to add() key: {} and exchange: {}", 333 new Object[]{attempt, aggregationRepository, key, copy, e}); 334 optimisticLockRetryPolicy.doDelay(attempt); 335 } 336 } while (optimisticLockRetryPolicy.shouldRetry(attempt)); 337 338 if (exhaustedRetries) { 339 throw new CamelExchangeException("Exhausted optimistic locking retry attempts, tried " + attempt + " times", exchange, 340 new OptimisticLockingAggregationRepository.OptimisticLockingException()); 341 } else if (aggregated != null) { 342 // we are completed so submit to completion 343 for (Exchange agg : aggregated) { 344 onSubmitCompletion(key, agg); 345 } 346 } 347 } else { 348 // copy exchange, and do not share the unit of work 349 // the aggregated output runs in another unit of work 350 Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false); 351 352 // remove the complete all groups headers as it should not be on the copy 353 removeFlagCompleteCurrentGroup(copy); 354 removeFlagCompleteAllGroups(copy); 355 removeFlagCompleteAllGroupsInclusive(copy); 356 357 // when memory based then its fast using synchronized, but if the aggregation repository is IO 358 // bound such as JPA etc then concurrent aggregation per correlation key could 359 // improve performance as we can run aggregation repository get/add in parallel 360 List<Exchange> aggregated; 361 lock.lock(); 362 try { 363 aggregated = doAggregation(key, copy); 364 } finally { 365 lock.unlock(); 366 } 367 // we are completed so do that work outside the lock 368 if (aggregated != null) { 369 for (Exchange agg : aggregated) { 370 onSubmitCompletion(key, agg); 371 } 372 } 373 } 374 375 // check for the special flag to force completion of all groups (inclusive of the message) 376 boolean completeAllGroupsInclusive = isCompleteAllGroupsInclusive(exchange); 377 if (completeAllGroupsInclusive) { 378 // remove the flag so we do not complete again 379 removeFlagCompleteAllGroupsInclusive(exchange); 380 forceCompletionOfAllGroups(); 381 } 382 } 383 384 private Object removeFlagCompleteCurrentGroup(Exchange exchange) { 385 //before everywhere : return exchange.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP); 386 return exchange.removeProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP); 387 } 388 389 private Boolean isCompleteCurrentGroup(Exchange exchange) { 390 return exchange.getProperty(Exchange.AGGREGATION_COMPLETE_CURRENT_GROUP, false, boolean.class); 391 } 392 393 private Object removeFlagCompleteAllGroups(Exchange exchange) { 394 Object removedHeader = exchange.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS); 395 Object removedProp = exchange.removeProperty(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS); 396 return removedHeader == null ? removedProp : removedHeader; 397 } 398 399 private Boolean isCompleteAllGroups(Exchange exchange) { 400 boolean retVal; 401 retVal = exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class); 402 if (!retVal) { 403 // according to doc it is a property but it is sometimes read as header 404 // some test don't fail because they use the header expression which contains a fallback to properties 405 retVal = exchange.getProperty(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS, false, boolean.class); 406 } 407 return retVal; 408 } 409 410 private Object removeFlagCompleteAllGroupsInclusive(Exchange exchange) { 411 return exchange.getIn().removeHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE); 412 } 413 414 private Boolean isCompleteAllGroupsInclusive(Exchange exchange) { 415 return exchange.getIn().getHeader(Exchange.AGGREGATION_COMPLETE_ALL_GROUPS_INCLUSIVE, false, boolean.class); 416 } 417 418 /** 419 * Aggregates the exchange with the given correlation key 420 * <p/> 421 * This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key 422 * in parallel. 423 * <p/> 424 * The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)} 425 * method which sends out the aggregated and completed {@link Exchange}. 426 * 427 * @param key the correlation key 428 * @param newExchange the exchange 429 * @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete 430 * @throws org.apache.camel.CamelExchangeException is thrown if error aggregating 431 */ 432 private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException { 433 LOG.trace("onAggregation +++ start +++ with correlation key: {}", key); 434 435 List<Exchange> list = new ArrayList<>(); 436 String complete = null; 437 438 Exchange answer; 439 Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key); 440 Exchange oldExchange = originalExchange; 441 442 Integer size = 1; 443 if (oldExchange != null) { 444 // hack to support legacy AggregationStrategy's that modify and return the oldExchange, these will not 445 // working when using an identify based approach for optimistic locking like the MemoryAggregationRepository. 446 if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) { 447 oldExchange = originalExchange.copy(); 448 } 449 size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class); 450 size++; 451 } 452 453 // prepare the exchanges for aggregation 454 ExchangeHelper.prepareAggregation(oldExchange, newExchange); 455 456 // check if we are pre complete 457 if (preCompletion) { 458 try { 459 // put the current aggregated size on the exchange so its avail during completion check 460 newExchange.setProperty(Exchange.AGGREGATED_SIZE, size); 461 complete = isPreCompleted(key, oldExchange, newExchange); 462 // make sure to track timeouts if not complete 463 if (complete == null) { 464 trackTimeout(key, newExchange); 465 } 466 // remove it afterwards 467 newExchange.removeProperty(Exchange.AGGREGATED_SIZE); 468 } catch (Throwable e) { 469 // must catch any exception from aggregation 470 throw new CamelExchangeException("Error occurred during preComplete", newExchange, e); 471 } 472 } else if (isEagerCheckCompletion()) { 473 // put the current aggregated size on the exchange so its avail during completion check 474 newExchange.setProperty(Exchange.AGGREGATED_SIZE, size); 475 complete = isCompleted(key, newExchange); 476 // make sure to track timeouts if not complete 477 if (complete == null) { 478 trackTimeout(key, newExchange); 479 } 480 // remove it afterwards 481 newExchange.removeProperty(Exchange.AGGREGATED_SIZE); 482 } 483 484 if (preCompletion && complete != null) { 485 // need to pre complete the current group before we aggregate 486 doAggregationComplete(complete, list, key, originalExchange, oldExchange); 487 // as we complete the current group eager, we should indicate the new group is not complete 488 complete = null; 489 // and clear old/original exchange as we start on a new group 490 oldExchange = null; 491 originalExchange = null; 492 // and reset the size to 1 493 size = 1; 494 // make sure to track timeout as we just restart the correlation group when we are in pre completion mode 495 trackTimeout(key, newExchange); 496 } 497 498 // aggregate the exchanges 499 try { 500 answer = onAggregation(oldExchange, newExchange); 501 } catch (Throwable e) { 502 // must catch any exception from aggregation 503 throw new CamelExchangeException("Error occurred during aggregation", newExchange, e); 504 } 505 if (answer == null) { 506 throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange); 507 } 508 509 // check for the special exchange property to force completion of all groups 510 boolean completeAllGroups = isCompleteAllGroups(answer); 511 if (completeAllGroups) { 512 // remove the exchange property so we do not complete again 513 removeFlagCompleteAllGroups(answer); 514 forceCompletionOfAllGroups(); 515 } else if (isCompletionOnNewCorrelationGroup() && originalExchange == null) { 516 // its a new group so force complete of all existing groups 517 forceCompletionOfAllGroups(); 518 } 519 520 // special for some repository implementations 521 if (aggregationRepository instanceof RecoverableAggregationRepository) { 522 boolean valid = oldExchange == null || answer.getExchangeId().equals(oldExchange.getExchangeId()); 523 if (!valid && aggregateRepositoryWarned.compareAndSet(false, true)) { 524 LOG.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible" 525 + " as otherwise this can lead to unexpected behavior with some RecoverableAggregationRepository implementations"); 526 } 527 } 528 529 // update the aggregated size 530 answer.setProperty(Exchange.AGGREGATED_SIZE, size); 531 532 // maybe we should check completion after the aggregation 533 if (!preCompletion && !isEagerCheckCompletion()) { 534 complete = isCompleted(key, answer); 535 // make sure to track timeouts if not complete 536 if (complete == null) { 537 trackTimeout(key, newExchange); 538 } 539 } 540 541 if (complete == null) { 542 // only need to update aggregation repository if we are not complete 543 doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer); 544 } else { 545 // if we are complete then add the answer to the list 546 doAggregationComplete(complete, list, key, originalExchange, answer); 547 } 548 549 LOG.trace("onAggregation +++ end +++ with correlation key: {}", key); 550 return list; 551 } 552 553 protected void doAggregationComplete(String complete, List<Exchange> list, String key, Exchange originalExchange, Exchange answer) { 554 if ("consumer".equals(complete)) { 555 for (String batchKey : batchConsumerCorrelationKeys) { 556 Exchange batchAnswer; 557 if (batchKey.equals(key)) { 558 // skip the current aggregated key as we have already aggregated it and have the answer 559 batchAnswer = answer; 560 } else { 561 batchAnswer = aggregationRepository.get(camelContext, batchKey); 562 } 563 564 if (batchAnswer != null) { 565 batchAnswer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete); 566 onCompletion(batchKey, originalExchange, batchAnswer, false); 567 list.add(batchAnswer); 568 } 569 } 570 batchConsumerCorrelationKeys.clear(); 571 // we have already submitted to completion, so answer should be null 572 answer = null; 573 } else if (answer != null) { 574 // we are complete for this exchange 575 answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, complete); 576 answer = onCompletion(key, originalExchange, answer, false); 577 } 578 579 if (answer != null) { 580 list.add(answer); 581 } 582 } 583 584 protected void doAggregationRepositoryAdd(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) { 585 LOG.trace("In progress aggregated oldExchange: {}, newExchange: {} with correlation key: {}", oldExchange, newExchange, key); 586 if (optimisticLocking) { 587 try { 588 ((OptimisticLockingAggregationRepository)aggregationRepository).add(camelContext, key, oldExchange, newExchange); 589 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 590 onOptimisticLockingFailure(oldExchange, newExchange); 591 throw e; 592 } 593 } else { 594 aggregationRepository.add(camelContext, key, newExchange); 595 } 596 } 597 598 protected void onOptimisticLockingFailure(Exchange oldExchange, Exchange newExchange) { 599 AggregationStrategy strategy = aggregationStrategy; 600 if (strategy instanceof DelegateAggregationStrategy) { 601 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 602 } 603 if (strategy instanceof OptimisticLockingAwareAggregationStrategy) { 604 LOG.trace("onOptimisticLockFailure with AggregationStrategy: {}, oldExchange: {}, newExchange: {}", 605 new Object[]{strategy, oldExchange, newExchange}); 606 ((OptimisticLockingAwareAggregationStrategy)strategy).onOptimisticLockFailure(oldExchange, newExchange); 607 } 608 } 609 610 /** 611 * Tests whether the given exchanges is pre-complete or not 612 * 613 * @param key the correlation key 614 * @param oldExchange the existing exchange 615 * @param newExchange the incoming exchange 616 * @return <tt>null</tt> if not pre-completed, otherwise a String with the type that triggered the pre-completion 617 */ 618 protected String isPreCompleted(String key, Exchange oldExchange, Exchange newExchange) { 619 boolean complete = false; 620 AggregationStrategy strategy = aggregationStrategy; 621 if (strategy instanceof DelegateAggregationStrategy) { 622 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 623 } 624 if (strategy instanceof PreCompletionAwareAggregationStrategy) { 625 complete = ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange); 626 } 627 return complete ? "strategy" : null; 628 } 629 630 /** 631 * Tests whether the given exchange is complete or not 632 * 633 * @param key the correlation key 634 * @param exchange the incoming exchange 635 * @return <tt>null</tt> if not completed, otherwise a String with the type that triggered the completion 636 */ 637 protected String isCompleted(String key, Exchange exchange) { 638 // batch consumer completion must always run first 639 if (isCompletionFromBatchConsumer()) { 640 batchConsumerCorrelationKeys.add(key); 641 batchConsumerCounter.incrementAndGet(); 642 int size = exchange.getProperty(Exchange.BATCH_SIZE, 0, Integer.class); 643 if (size > 0 && batchConsumerCounter.intValue() >= size) { 644 // batch consumer is complete then reset the counter 645 batchConsumerCounter.set(0); 646 return "consumer"; 647 } 648 } 649 650 if (isCompleteCurrentGroup(exchange)) { 651 removeFlagCompleteCurrentGroup(exchange); 652 return "strategy"; 653 } 654 655 if (getCompletionPredicate() != null) { 656 boolean answer = getCompletionPredicate().matches(exchange); 657 if (answer) { 658 return "predicate"; 659 } 660 } 661 662 boolean sizeChecked = false; 663 if (getCompletionSizeExpression() != null) { 664 Integer value = getCompletionSizeExpression().evaluate(exchange, Integer.class); 665 if (value != null && value > 0) { 666 // mark as already checked size as expression takes precedence over static configured 667 sizeChecked = true; 668 int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class); 669 if (size >= value) { 670 return "size"; 671 } 672 } 673 } 674 if (!sizeChecked && getCompletionSize() > 0) { 675 int size = exchange.getProperty(Exchange.AGGREGATED_SIZE, 1, Integer.class); 676 if (size >= getCompletionSize()) { 677 return "size"; 678 } 679 } 680 681 // not complete 682 return null; 683 } 684 685 protected void trackTimeout(String key, Exchange exchange) { 686 // timeout can be either evaluated based on an expression or from a fixed value 687 // expression takes precedence 688 boolean timeoutSet = false; 689 if (getCompletionTimeoutExpression() != null) { 690 Long value = getCompletionTimeoutExpression().evaluate(exchange, Long.class); 691 if (value != null && value > 0) { 692 if (LOG.isTraceEnabled()) { 693 LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}", 694 new Object[]{key, value, exchange}); 695 } 696 addExchangeToTimeoutMap(key, exchange, value); 697 timeoutSet = true; 698 } 699 } 700 if (!timeoutSet && getCompletionTimeout() > 0) { 701 // timeout is used so use the timeout map to keep an eye on this 702 if (LOG.isTraceEnabled()) { 703 LOG.trace("Updating correlation key {} to timeout after {} ms. as exchange received: {}", 704 new Object[]{key, getCompletionTimeout(), exchange}); 705 } 706 addExchangeToTimeoutMap(key, exchange, getCompletionTimeout()); 707 } 708 } 709 710 protected Exchange onAggregation(Exchange oldExchange, Exchange newExchange) { 711 return aggregationStrategy.aggregate(oldExchange, newExchange); 712 } 713 714 protected boolean onPreCompletionAggregation(Exchange oldExchange, Exchange newExchange) { 715 AggregationStrategy strategy = aggregationStrategy; 716 if (strategy instanceof DelegateAggregationStrategy) { 717 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 718 } 719 if (strategy instanceof PreCompletionAwareAggregationStrategy) { 720 return ((PreCompletionAwareAggregationStrategy) strategy).preComplete(oldExchange, newExchange); 721 } 722 return false; 723 } 724 725 protected Exchange onCompletion(final String key, final Exchange original, final Exchange aggregated, boolean fromTimeout) { 726 // store the correlation key as property before we remove so the repository has that information 727 if (original != null) { 728 original.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key); 729 } 730 aggregated.setProperty(Exchange.AGGREGATED_CORRELATION_KEY, key); 731 732 // only remove if we have previous added (as we could potentially complete with only 1 exchange) 733 // (if we have previous added then we have that as the original exchange) 734 if (original != null) { 735 // remove from repository as its completed, we do this first as to trigger any OptimisticLockingException's 736 aggregationRepository.remove(aggregated.getContext(), key, original); 737 } 738 739 if (!fromTimeout && timeoutMap != null) { 740 // cleanup timeout map if it was a incoming exchange which triggered the timeout (and not the timeout checker) 741 LOG.trace("Removing correlation key {} from timeout", key); 742 timeoutMap.remove(key); 743 } 744 745 // this key has been closed so add it to the closed map 746 if (closedCorrelationKeys != null) { 747 closedCorrelationKeys.put(key, key); 748 } 749 750 if (fromTimeout) { 751 // invoke timeout if its timeout aware aggregation strategy, 752 // to allow any custom processing before discarding the exchange 753 AggregationStrategy strategy = aggregationStrategy; 754 if (strategy instanceof DelegateAggregationStrategy) { 755 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 756 } 757 if (strategy instanceof TimeoutAwareAggregationStrategy) { 758 long timeout = getCompletionTimeout() > 0 ? getCompletionTimeout() : -1; 759 ((TimeoutAwareAggregationStrategy) strategy).timeout(aggregated, -1, -1, timeout); 760 } 761 } 762 763 Exchange answer; 764 if (fromTimeout && isDiscardOnCompletionTimeout()) { 765 // discard due timeout 766 LOG.debug("Aggregation for correlation key {} discarding aggregated exchange: {}", key, aggregated); 767 // must confirm the discarded exchange 768 aggregationRepository.confirm(aggregated.getContext(), aggregated.getExchangeId()); 769 // and remove redelivery state as well 770 redeliveryState.remove(aggregated.getExchangeId()); 771 // the completion was from timeout and we should just discard it 772 answer = null; 773 } else { 774 // the aggregated exchange should be published (sent out) 775 answer = aggregated; 776 } 777 778 return answer; 779 } 780 781 private void onSubmitCompletion(final String key, final Exchange exchange) { 782 LOG.debug("Aggregation complete for correlation key {} sending aggregated exchange: {}", key, exchange); 783 784 // add this as in progress before we submit the task 785 inProgressCompleteExchanges.add(exchange.getExchangeId()); 786 787 // invoke the on completion callback 788 AggregationStrategy target = aggregationStrategy; 789 if (target instanceof DelegateAggregationStrategy) { 790 target = ((DelegateAggregationStrategy) target).getDelegate(); 791 } 792 if (target instanceof CompletionAwareAggregationStrategy) { 793 ((CompletionAwareAggregationStrategy) target).onCompletion(exchange); 794 } 795 796 if (getStatistics().isStatisticsEnabled()) { 797 totalCompleted.incrementAndGet(); 798 799 String completedBy = exchange.getProperty(Exchange.AGGREGATED_COMPLETED_BY, String.class); 800 if ("interval".equals(completedBy)) { 801 completedByInterval.incrementAndGet(); 802 } else if ("timeout".equals(completedBy)) { 803 completedByTimeout.incrementAndGet(); 804 } else if ("force".equals(completedBy)) { 805 completedByForce.incrementAndGet(); 806 } else if ("consumer".equals(completedBy)) { 807 completedByBatchConsumer.incrementAndGet(); 808 } else if ("predicate".equals(completedBy)) { 809 completedByPredicate.incrementAndGet(); 810 } else if ("size".equals(completedBy)) { 811 completedBySize.incrementAndGet(); 812 } else if ("strategy".equals(completedBy)) { 813 completedByStrategy.incrementAndGet(); 814 } 815 } 816 817 // send this exchange 818 executorService.submit(new Runnable() { 819 public void run() { 820 LOG.debug("Processing aggregated exchange: {}", exchange); 821 822 // add on completion task so we remember to update the inProgressCompleteExchanges 823 exchange.addOnCompletion(new AggregateOnCompletion(exchange.getExchangeId())); 824 825 try { 826 processor.process(exchange); 827 } catch (Throwable e) { 828 exchange.setException(e); 829 } 830 831 // log exception if there was a problem 832 if (exchange.getException() != null) { 833 // if there was an exception then let the exception handler handle it 834 getExceptionHandler().handleException("Error processing aggregated exchange", exchange, exchange.getException()); 835 } else { 836 LOG.trace("Processing aggregated exchange: {} complete.", exchange); 837 } 838 } 839 }); 840 } 841 842 /** 843 * Restores the timeout map with timeout values from the aggregation repository. 844 * <p/> 845 * This is needed in case the aggregator has been stopped and started again (for example a server restart). 846 * Then the existing exchanges from the {@link AggregationRepository} must have their timeout conditions restored. 847 */ 848 protected void restoreTimeoutMapFromAggregationRepository() throws Exception { 849 // grab the timeout value for each partly aggregated exchange 850 Set<String> keys = aggregationRepository.getKeys(); 851 if (keys == null || keys.isEmpty()) { 852 return; 853 } 854 855 StopWatch watch = new StopWatch(); 856 LOG.trace("Starting restoring CompletionTimeout for {} existing exchanges from the aggregation repository...", keys.size()); 857 858 for (String key : keys) { 859 Exchange exchange = aggregationRepository.get(camelContext, key); 860 // grab the timeout value 861 long timeout = exchange.hasProperties() ? exchange.getProperty(Exchange.AGGREGATED_TIMEOUT, 0, long.class) : 0; 862 if (timeout > 0) { 863 LOG.trace("Restoring CompletionTimeout for exchangeId: {} with timeout: {} millis.", exchange.getExchangeId(), timeout); 864 addExchangeToTimeoutMap(key, exchange, timeout); 865 } 866 } 867 868 // log duration of this task so end user can see how long it takes to pre-check this upon starting 869 LOG.info("Restored {} CompletionTimeout conditions in the AggregationTimeoutChecker in {}", 870 timeoutMap.size(), TimeUtils.printDuration(watch.taken())); 871 } 872 873 /** 874 * Adds the given exchange to the timeout map, which is used by the timeout checker task to trigger timeouts. 875 * 876 * @param key the correlation key 877 * @param exchange the exchange 878 * @param timeout the timeout value in millis 879 */ 880 private void addExchangeToTimeoutMap(String key, Exchange exchange, long timeout) { 881 // store the timeout value on the exchange as well, in case we need it later 882 exchange.setProperty(Exchange.AGGREGATED_TIMEOUT, timeout); 883 timeoutMap.put(key, exchange.getExchangeId(), timeout); 884 } 885 886 /** 887 * Current number of closed correlation keys in the memory cache 888 */ 889 public int getClosedCorrelationKeysCacheSize() { 890 if (closedCorrelationKeys != null) { 891 return closedCorrelationKeys.size(); 892 } else { 893 return 0; 894 } 895 } 896 897 /** 898 * Clear all the closed correlation keys stored in the cache 899 */ 900 public void clearClosedCorrelationKeysCache() { 901 if (closedCorrelationKeys != null) { 902 closedCorrelationKeys.clear(); 903 } 904 } 905 906 public AggregateProcessorStatistics getStatistics() { 907 return statistics; 908 } 909 910 public int getInProgressCompleteExchanges() { 911 return inProgressCompleteExchanges.size(); 912 } 913 914 public Predicate getCompletionPredicate() { 915 return completionPredicate; 916 } 917 918 public void setCompletionPredicate(Predicate completionPredicate) { 919 this.completionPredicate = completionPredicate; 920 } 921 922 public boolean isEagerCheckCompletion() { 923 return eagerCheckCompletion; 924 } 925 926 public void setEagerCheckCompletion(boolean eagerCheckCompletion) { 927 this.eagerCheckCompletion = eagerCheckCompletion; 928 } 929 930 public long getCompletionTimeout() { 931 return completionTimeout; 932 } 933 934 public void setCompletionTimeout(long completionTimeout) { 935 this.completionTimeout = completionTimeout; 936 } 937 938 public Expression getCompletionTimeoutExpression() { 939 return completionTimeoutExpression; 940 } 941 942 public void setCompletionTimeoutExpression(Expression completionTimeoutExpression) { 943 this.completionTimeoutExpression = completionTimeoutExpression; 944 } 945 946 public long getCompletionInterval() { 947 return completionInterval; 948 } 949 950 public void setCompletionInterval(long completionInterval) { 951 this.completionInterval = completionInterval; 952 } 953 954 public int getCompletionSize() { 955 return completionSize; 956 } 957 958 public void setCompletionSize(int completionSize) { 959 this.completionSize = completionSize; 960 } 961 962 public Expression getCompletionSizeExpression() { 963 return completionSizeExpression; 964 } 965 966 public void setCompletionSizeExpression(Expression completionSizeExpression) { 967 this.completionSizeExpression = completionSizeExpression; 968 } 969 970 public boolean isIgnoreInvalidCorrelationKeys() { 971 return ignoreInvalidCorrelationKeys; 972 } 973 974 public void setIgnoreInvalidCorrelationKeys(boolean ignoreInvalidCorrelationKeys) { 975 this.ignoreInvalidCorrelationKeys = ignoreInvalidCorrelationKeys; 976 } 977 978 public Integer getCloseCorrelationKeyOnCompletion() { 979 return closeCorrelationKeyOnCompletion; 980 } 981 982 public void setCloseCorrelationKeyOnCompletion(Integer closeCorrelationKeyOnCompletion) { 983 this.closeCorrelationKeyOnCompletion = closeCorrelationKeyOnCompletion; 984 } 985 986 public boolean isCompletionFromBatchConsumer() { 987 return completionFromBatchConsumer; 988 } 989 990 public void setCompletionFromBatchConsumer(boolean completionFromBatchConsumer) { 991 this.completionFromBatchConsumer = completionFromBatchConsumer; 992 } 993 994 public boolean isCompletionOnNewCorrelationGroup() { 995 return completionOnNewCorrelationGroup; 996 } 997 998 public void setCompletionOnNewCorrelationGroup(boolean completionOnNewCorrelationGroup) { 999 this.completionOnNewCorrelationGroup = completionOnNewCorrelationGroup; 1000 } 1001 1002 public boolean isCompleteAllOnStop() { 1003 return completeAllOnStop; 1004 } 1005 1006 public long getCompletionTimeoutCheckerInterval() { 1007 return completionTimeoutCheckerInterval; 1008 } 1009 1010 public void setCompletionTimeoutCheckerInterval(long completionTimeoutCheckerInterval) { 1011 this.completionTimeoutCheckerInterval = completionTimeoutCheckerInterval; 1012 } 1013 1014 public ExceptionHandler getExceptionHandler() { 1015 return exceptionHandler; 1016 } 1017 1018 public void setExceptionHandler(ExceptionHandler exceptionHandler) { 1019 this.exceptionHandler = exceptionHandler; 1020 } 1021 1022 public boolean isParallelProcessing() { 1023 return parallelProcessing; 1024 } 1025 1026 public void setParallelProcessing(boolean parallelProcessing) { 1027 this.parallelProcessing = parallelProcessing; 1028 } 1029 1030 public boolean isOptimisticLocking() { 1031 return optimisticLocking; 1032 } 1033 1034 public void setOptimisticLocking(boolean optimisticLocking) { 1035 this.optimisticLocking = optimisticLocking; 1036 } 1037 1038 public AggregationRepository getAggregationRepository() { 1039 return aggregationRepository; 1040 } 1041 1042 public void setAggregationRepository(AggregationRepository aggregationRepository) { 1043 this.aggregationRepository = aggregationRepository; 1044 } 1045 1046 public boolean isDiscardOnCompletionTimeout() { 1047 return discardOnCompletionTimeout; 1048 } 1049 1050 public void setDiscardOnCompletionTimeout(boolean discardOnCompletionTimeout) { 1051 this.discardOnCompletionTimeout = discardOnCompletionTimeout; 1052 } 1053 1054 public void setForceCompletionOnStop(boolean forceCompletionOnStop) { 1055 this.forceCompletionOnStop = forceCompletionOnStop; 1056 } 1057 1058 public void setCompleteAllOnStop(boolean completeAllOnStop) { 1059 this.completeAllOnStop = completeAllOnStop; 1060 } 1061 1062 public void setTimeoutCheckerExecutorService(ScheduledExecutorService timeoutCheckerExecutorService) { 1063 this.timeoutCheckerExecutorService = timeoutCheckerExecutorService; 1064 } 1065 1066 public ScheduledExecutorService getTimeoutCheckerExecutorService() { 1067 return timeoutCheckerExecutorService; 1068 } 1069 1070 public boolean isShutdownTimeoutCheckerExecutorService() { 1071 return shutdownTimeoutCheckerExecutorService; 1072 } 1073 1074 public void setShutdownTimeoutCheckerExecutorService(boolean shutdownTimeoutCheckerExecutorService) { 1075 this.shutdownTimeoutCheckerExecutorService = shutdownTimeoutCheckerExecutorService; 1076 } 1077 1078 public void setOptimisticLockRetryPolicy(OptimisticLockRetryPolicy optimisticLockRetryPolicy) { 1079 this.optimisticLockRetryPolicy = optimisticLockRetryPolicy; 1080 } 1081 1082 public OptimisticLockRetryPolicy getOptimisticLockRetryPolicy() { 1083 return optimisticLockRetryPolicy; 1084 } 1085 1086 public AggregationStrategy getAggregationStrategy() { 1087 return aggregationStrategy; 1088 } 1089 1090 public void setAggregationStrategy(AggregationStrategy aggregationStrategy) { 1091 this.aggregationStrategy = aggregationStrategy; 1092 } 1093 1094 public Expression getCorrelationExpression() { 1095 return correlationExpression; 1096 } 1097 1098 public void setCorrelationExpression(Expression correlationExpression) { 1099 this.correlationExpression = correlationExpression; 1100 } 1101 1102 public AggregateController getAggregateController() { 1103 return aggregateController; 1104 } 1105 1106 public void setAggregateController(AggregateController aggregateController) { 1107 this.aggregateController = aggregateController; 1108 } 1109 1110 /** 1111 * On completion task which keeps the booking of the in progress up to date 1112 */ 1113 private final class AggregateOnCompletion implements Synchronization { 1114 private final String exchangeId; 1115 1116 private AggregateOnCompletion(String exchangeId) { 1117 // must use the original exchange id as it could potentially change if send over SEDA etc. 1118 this.exchangeId = exchangeId; 1119 } 1120 1121 public void onFailure(Exchange exchange) { 1122 LOG.trace("Aggregated exchange onFailure: {}", exchange); 1123 1124 // must remember to remove in progress when we failed 1125 inProgressCompleteExchanges.remove(exchangeId); 1126 // do not remove redelivery state as we need it when we redeliver again later 1127 } 1128 1129 public void onComplete(Exchange exchange) { 1130 LOG.trace("Aggregated exchange onComplete: {}", exchange); 1131 1132 // only confirm if we processed without a problem 1133 try { 1134 aggregationRepository.confirm(exchange.getContext(), exchangeId); 1135 // and remove redelivery state as well 1136 redeliveryState.remove(exchangeId); 1137 } finally { 1138 // must remember to remove in progress when we are complete 1139 inProgressCompleteExchanges.remove(exchangeId); 1140 } 1141 } 1142 1143 @Override 1144 public String toString() { 1145 return "AggregateOnCompletion"; 1146 } 1147 } 1148 1149 /** 1150 * Background task that looks for aggregated exchanges which is triggered by completion timeouts. 1151 */ 1152 private final class AggregationTimeoutMap extends DefaultTimeoutMap<String, String> { 1153 1154 private AggregationTimeoutMap(ScheduledExecutorService executor, long requestMapPollTimeMillis) { 1155 // do NOT use locking on the timeout map as this aggregator has its own shared lock we will use instead 1156 super(executor, requestMapPollTimeMillis, optimisticLocking); 1157 } 1158 1159 @Override 1160 public void purge() { 1161 // must acquire the shared aggregation lock to be able to purge 1162 if (!optimisticLocking) { 1163 lock.lock(); 1164 } 1165 try { 1166 super.purge(); 1167 } finally { 1168 if (!optimisticLocking) { 1169 lock.unlock(); 1170 } 1171 } 1172 } 1173 1174 @Override 1175 public boolean onEviction(String key, String exchangeId) { 1176 log.debug("Completion timeout triggered for correlation key: {}", key); 1177 1178 boolean inProgress = inProgressCompleteExchanges.contains(exchangeId); 1179 if (inProgress) { 1180 LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId); 1181 return true; 1182 } 1183 1184 // get the aggregated exchange 1185 boolean evictionStolen = false; 1186 Exchange answer = aggregationRepository.get(camelContext, key); 1187 if (answer == null) { 1188 evictionStolen = true; 1189 } else { 1190 // indicate it was completed by timeout 1191 answer.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "timeout"); 1192 try { 1193 answer = onCompletion(key, answer, answer, true); 1194 if (answer != null) { 1195 onSubmitCompletion(key, answer); 1196 } 1197 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 1198 evictionStolen = true; 1199 } 1200 } 1201 1202 if (optimisticLocking && evictionStolen) { 1203 LOG.debug("Another Camel instance has already successfully correlated or processed this timeout eviction " 1204 + "for exchange with id: {} and correlation id: {}", exchangeId, key); 1205 } 1206 return true; 1207 } 1208 } 1209 1210 /** 1211 * Background task that triggers completion based on interval. 1212 */ 1213 private final class AggregationIntervalTask implements Runnable { 1214 1215 public void run() { 1216 // only run if CamelContext has been fully started 1217 if (!camelContext.getStatus().isStarted()) { 1218 LOG.trace("Completion interval task cannot start due CamelContext({}) has not been started yet", camelContext.getName()); 1219 return; 1220 } 1221 1222 LOG.trace("Starting completion interval task"); 1223 1224 // trigger completion for all in the repository 1225 Set<String> keys = aggregationRepository.getKeys(); 1226 1227 if (keys != null && !keys.isEmpty()) { 1228 // must acquire the shared aggregation lock to be able to trigger interval completion 1229 if (!optimisticLocking) { 1230 lock.lock(); 1231 } 1232 try { 1233 for (String key : keys) { 1234 boolean stolenInterval = false; 1235 Exchange exchange = aggregationRepository.get(camelContext, key); 1236 if (exchange == null) { 1237 stolenInterval = true; 1238 } else { 1239 LOG.trace("Completion interval triggered for correlation key: {}", key); 1240 // indicate it was completed by interval 1241 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "interval"); 1242 try { 1243 Exchange answer = onCompletion(key, exchange, exchange, false); 1244 if (answer != null) { 1245 onSubmitCompletion(key, answer); 1246 } 1247 } catch (OptimisticLockingAggregationRepository.OptimisticLockingException e) { 1248 stolenInterval = true; 1249 } 1250 } 1251 if (optimisticLocking && stolenInterval) { 1252 LOG.debug("Another Camel instance has already processed this interval aggregation for exchange with correlation id: {}", key); 1253 } 1254 } 1255 } finally { 1256 if (!optimisticLocking) { 1257 lock.unlock(); 1258 } 1259 } 1260 } 1261 1262 LOG.trace("Completion interval task complete"); 1263 } 1264 } 1265 1266 /** 1267 * Background task that looks for aggregated exchanges to recover. 1268 */ 1269 private final class RecoverTask implements Runnable { 1270 private final RecoverableAggregationRepository recoverable; 1271 1272 private RecoverTask(RecoverableAggregationRepository recoverable) { 1273 this.recoverable = recoverable; 1274 } 1275 1276 public void run() { 1277 // only run if CamelContext has been fully started 1278 if (!camelContext.getStatus().isStarted()) { 1279 LOG.trace("Recover check cannot start due CamelContext({}) has not been started yet", camelContext.getName()); 1280 return; 1281 } 1282 1283 LOG.trace("Starting recover check"); 1284 1285 // copy the current in progress before doing scan 1286 final Set<String> copyOfInProgress = new LinkedHashSet<>(inProgressCompleteExchanges); 1287 1288 Set<String> exchangeIds = recoverable.scan(camelContext); 1289 for (String exchangeId : exchangeIds) { 1290 1291 // we may shutdown while doing recovery 1292 if (!isRunAllowed()) { 1293 LOG.info("We are shutting down so stop recovering"); 1294 return; 1295 } 1296 if (!optimisticLocking) { 1297 lock.lock(); 1298 } 1299 try { 1300 // consider in progress if it was in progress before we did the scan, or currently after we did the scan 1301 // its safer to consider it in progress than risk duplicates due both in progress + recovered 1302 boolean inProgress = copyOfInProgress.contains(exchangeId) || inProgressCompleteExchanges.contains(exchangeId); 1303 if (inProgress) { 1304 LOG.trace("Aggregated exchange with id: {} is already in progress.", exchangeId); 1305 } else { 1306 LOG.debug("Loading aggregated exchange with id: {} to be recovered.", exchangeId); 1307 Exchange exchange = recoverable.recover(camelContext, exchangeId); 1308 if (exchange != null) { 1309 // get the correlation key 1310 String key = exchange.getProperty(Exchange.AGGREGATED_CORRELATION_KEY, String.class); 1311 // and mark it as redelivered 1312 exchange.getIn().setHeader(Exchange.REDELIVERED, Boolean.TRUE); 1313 1314 // get the current redelivery data 1315 RedeliveryData data = redeliveryState.get(exchange.getExchangeId()); 1316 1317 // if we are exhausted, then move to dead letter channel 1318 if (data != null && recoverable.getMaximumRedeliveries() > 0 && data.redeliveryCounter >= recoverable.getMaximumRedeliveries()) { 1319 LOG.warn("The recovered exchange is exhausted after " + recoverable.getMaximumRedeliveries() 1320 + " attempts, will now be moved to dead letter channel: " + recoverable.getDeadLetterUri()); 1321 1322 // send to DLC 1323 try { 1324 // set redelivery counter 1325 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter); 1326 exchange.getIn().setHeader(Exchange.REDELIVERY_EXHAUSTED, Boolean.TRUE); 1327 deadLetterProducerTemplate.send(recoverable.getDeadLetterUri(), exchange); 1328 } catch (Throwable e) { 1329 exchange.setException(e); 1330 } 1331 1332 // handle if failed 1333 if (exchange.getException() != null) { 1334 getExceptionHandler().handleException("Failed to move recovered Exchange to dead letter channel: " + recoverable.getDeadLetterUri(), exchange.getException()); 1335 } else { 1336 // it was ok, so confirm after it has been moved to dead letter channel, so we wont recover it again 1337 recoverable.confirm(camelContext, exchangeId); 1338 } 1339 } else { 1340 // update current redelivery state 1341 if (data == null) { 1342 // create new data 1343 data = new RedeliveryData(); 1344 redeliveryState.put(exchange.getExchangeId(), data); 1345 } 1346 data.redeliveryCounter++; 1347 1348 // set redelivery counter 1349 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, data.redeliveryCounter); 1350 if (recoverable.getMaximumRedeliveries() > 0) { 1351 exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, recoverable.getMaximumRedeliveries()); 1352 } 1353 1354 LOG.debug("Delivery attempt: {} to recover aggregated exchange with id: {}", data.redeliveryCounter, exchangeId); 1355 1356 // not exhaust so resubmit the recovered exchange 1357 onSubmitCompletion(key, exchange); 1358 } 1359 } 1360 } 1361 } finally { 1362 if (!optimisticLocking) { 1363 lock.unlock(); 1364 } 1365 } 1366 } 1367 1368 LOG.trace("Recover check complete"); 1369 } 1370 } 1371 1372 @Override 1373 @SuppressWarnings("unchecked") 1374 protected void doStart() throws Exception { 1375 AggregationStrategy strategy = aggregationStrategy; 1376 if (strategy instanceof DelegateAggregationStrategy) { 1377 strategy = ((DelegateAggregationStrategy) strategy).getDelegate(); 1378 } 1379 if (strategy instanceof CamelContextAware) { 1380 ((CamelContextAware) strategy).setCamelContext(camelContext); 1381 } 1382 if (strategy instanceof PreCompletionAwareAggregationStrategy) { 1383 preCompletion = true; 1384 LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId()); 1385 } 1386 1387 if (!preCompletion) { 1388 // if not in pre completion mode then check we configured the completion required 1389 if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null 1390 && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null 1391 && getCompletionSizeExpression() == null) { 1392 throw new IllegalStateException("At least one of the completions options" 1393 + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set"); 1394 } 1395 } 1396 1397 if (getCloseCorrelationKeyOnCompletion() != null) { 1398 if (getCloseCorrelationKeyOnCompletion() > 0) { 1399 LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of {}", getCloseCorrelationKeyOnCompletion()); 1400 closedCorrelationKeys = LRUCacheFactory.newLRUCache(getCloseCorrelationKeyOnCompletion()); 1401 } else { 1402 LOG.info("Using ClosedCorrelationKeys with unbounded capacity"); 1403 closedCorrelationKeys = new ConcurrentHashMap<>(); 1404 } 1405 } 1406 1407 if (aggregationRepository == null) { 1408 aggregationRepository = new MemoryAggregationRepository(optimisticLocking); 1409 LOG.info("Defaulting to MemoryAggregationRepository"); 1410 } 1411 1412 if (optimisticLocking) { 1413 if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) { 1414 throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository"); 1415 } 1416 LOG.info("Optimistic locking is enabled"); 1417 } 1418 1419 ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository); 1420 1421 // should we use recover checker 1422 if (aggregationRepository instanceof RecoverableAggregationRepository) { 1423 RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository; 1424 if (recoverable.isUseRecovery()) { 1425 long interval = recoverable.getRecoveryIntervalInMillis(); 1426 if (interval <= 0) { 1427 throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval); 1428 } 1429 1430 // create a background recover thread to check every interval 1431 recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1); 1432 Runnable recoverTask = new RecoverTask(recoverable); 1433 LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every {} millis.", interval); 1434 // use fixed delay so there is X interval between each run 1435 recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS); 1436 1437 if (recoverable.getDeadLetterUri() != null) { 1438 int max = recoverable.getMaximumRedeliveries(); 1439 if (max <= 0) { 1440 throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max); 1441 } 1442 LOG.info("After {} failed redelivery attempts Exchanges will be moved to deadLetterUri: {}", max, recoverable.getDeadLetterUri()); 1443 1444 // dead letter uri must be a valid endpoint 1445 Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri()); 1446 if (endpoint == null) { 1447 throw new NoSuchEndpointException(recoverable.getDeadLetterUri()); 1448 } 1449 deadLetterProducerTemplate = camelContext.createProducerTemplate(); 1450 } 1451 } 1452 } 1453 1454 if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) { 1455 throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both."); 1456 } 1457 if (getCompletionInterval() > 0) { 1458 LOG.info("Using CompletionInterval to run every {} millis.", getCompletionInterval()); 1459 if (getTimeoutCheckerExecutorService() == null) { 1460 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1)); 1461 shutdownTimeoutCheckerExecutorService = true; 1462 } 1463 // trigger completion based on interval 1464 getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS); 1465 } 1466 1467 // start timeout service if its in use 1468 if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) { 1469 LOG.info("Using CompletionTimeout to trigger after {} millis of inactivity.", getCompletionTimeout()); 1470 if (getTimeoutCheckerExecutorService() == null) { 1471 setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1)); 1472 shutdownTimeoutCheckerExecutorService = true; 1473 } 1474 // check for timed out aggregated messages once every second 1475 timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), getCompletionTimeoutCheckerInterval()); 1476 // fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we 1477 // need to re-establish the timeout map so timeout can trigger 1478 restoreTimeoutMapFromAggregationRepository(); 1479 ServiceHelper.startService(timeoutMap); 1480 } 1481 1482 if (aggregateController == null) { 1483 aggregateController = new DefaultAggregateController(); 1484 } 1485 aggregateController.onStart(this); 1486 } 1487 1488 @Override 1489 protected void doStop() throws Exception { 1490 // note: we cannot do doForceCompletionOnStop from this doStop method 1491 // as this is handled in the prepareShutdown method which is also invoked when stopping a route 1492 // and is better suited for preparing to shutdown than this doStop method is 1493 1494 if (aggregateController != null) { 1495 aggregateController.onStop(this); 1496 } 1497 1498 if (recoverService != null) { 1499 camelContext.getExecutorServiceManager().shutdown(recoverService); 1500 } 1501 1502 if (shutdownTimeoutCheckerExecutorService && timeoutCheckerExecutorService != null) { 1503 camelContext.getExecutorServiceManager().shutdown(timeoutCheckerExecutorService); 1504 timeoutCheckerExecutorService = null; 1505 shutdownTimeoutCheckerExecutorService = false; 1506 } 1507 1508 ServiceHelper.stopServices(timeoutMap, processor, deadLetterProducerTemplate); 1509 1510 if (closedCorrelationKeys != null) { 1511 // it may be a service so stop it as well 1512 ServiceHelper.stopService(closedCorrelationKeys); 1513 closedCorrelationKeys.clear(); 1514 } 1515 batchConsumerCorrelationKeys.clear(); 1516 redeliveryState.clear(); 1517 } 1518 1519 @Override 1520 public void prepareShutdown(boolean suspendOnly, boolean forced) { 1521 // we are shutting down, so force completion if this option was enabled 1522 // but only do this when forced=false, as that is when we have chance to 1523 // send out new messages to be routed by Camel. When forced=true, then 1524 // we have to shutdown in a hurry 1525 if (!forced && forceCompletionOnStop) { 1526 doForceCompletionOnStop(); 1527 } 1528 } 1529 1530 @Override 1531 public boolean deferShutdown(ShutdownRunningTask shutdownRunningTask) { 1532 // not in use 1533 return true; 1534 } 1535 1536 @Override 1537 public int getPendingExchangesSize() { 1538 if (completeAllOnStop) { 1539 // we want to regard all pending exchanges in the repo as inflight 1540 Set<String> keys = getAggregationRepository().getKeys(); 1541 return keys != null ? keys.size() : 0; 1542 } else { 1543 return 0; 1544 } 1545 } 1546 1547 private void doForceCompletionOnStop() { 1548 int expected = forceCompletionOfAllGroups(); 1549 1550 StopWatch watch = new StopWatch(); 1551 while (inProgressCompleteExchanges.size() > 0) { 1552 LOG.trace("Waiting for {} inflight exchanges to complete", getInProgressCompleteExchanges()); 1553 try { 1554 Thread.sleep(100); 1555 } catch (InterruptedException e) { 1556 // break out as we got interrupted such as the JVM terminating 1557 LOG.warn("Interrupted while waiting for {} inflight exchanges to complete.", getInProgressCompleteExchanges()); 1558 break; 1559 } 1560 } 1561 1562 if (expected > 0) { 1563 LOG.info("Forcing completion of all groups with {} exchanges completed in {}", expected, TimeUtils.printDuration(watch.taken())); 1564 } 1565 } 1566 1567 @Override 1568 protected void doShutdown() throws Exception { 1569 // shutdown aggregation repository and the strategy 1570 ServiceHelper.stopAndShutdownServices(aggregationRepository, aggregationStrategy); 1571 1572 // cleanup when shutting down 1573 inProgressCompleteExchanges.clear(); 1574 1575 if (shutdownExecutorService) { 1576 camelContext.getExecutorServiceManager().shutdownNow(executorService); 1577 } 1578 if (shutdownTimeoutCheckerExecutorService) { 1579 camelContext.getExecutorServiceManager().shutdownNow(timeoutCheckerExecutorService); 1580 timeoutCheckerExecutorService = null; 1581 } 1582 1583 super.doShutdown(); 1584 } 1585 1586 public int forceCompletionOfGroup(String key) { 1587 // must acquire the shared aggregation lock to be able to trigger force completion 1588 int total = 0; 1589 1590 if (!optimisticLocking) { 1591 lock.lock(); 1592 } 1593 try { 1594 Exchange exchange = aggregationRepository.get(camelContext, key); 1595 if (exchange != null) { 1596 total = 1; 1597 LOG.trace("Force completion triggered for correlation key: {}", key); 1598 // indicate it was completed by a force completion request 1599 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force"); 1600 Exchange answer = onCompletion(key, exchange, exchange, false); 1601 if (answer != null) { 1602 onSubmitCompletion(key, answer); 1603 } 1604 } 1605 } finally { 1606 if (!optimisticLocking) { 1607 lock.unlock(); 1608 } 1609 } 1610 LOG.trace("Completed force completion of group {}", key); 1611 1612 if (total > 0) { 1613 LOG.debug("Forcing completion of group {} with {} exchanges", key, total); 1614 } 1615 return total; 1616 } 1617 1618 public int forceCompletionOfAllGroups() { 1619 1620 // only run if CamelContext has been fully started or is stopping 1621 boolean allow = camelContext.getStatus().isStarted() || camelContext.getStatus().isStopping(); 1622 if (!allow) { 1623 LOG.warn("Cannot start force completion of all groups because CamelContext({}) has not been started", camelContext.getName()); 1624 return 0; 1625 } 1626 1627 LOG.trace("Starting force completion of all groups task"); 1628 1629 // trigger completion for all in the repository 1630 Set<String> keys = aggregationRepository.getKeys(); 1631 1632 int total = 0; 1633 if (keys != null && !keys.isEmpty()) { 1634 // must acquire the shared aggregation lock to be able to trigger force completion 1635 if (!optimisticLocking) { 1636 lock.lock(); 1637 } 1638 total = keys.size(); 1639 try { 1640 for (String key : keys) { 1641 Exchange exchange = aggregationRepository.get(camelContext, key); 1642 if (exchange != null) { 1643 LOG.trace("Force completion triggered for correlation key: {}", key); 1644 // indicate it was completed by a force completion request 1645 exchange.setProperty(Exchange.AGGREGATED_COMPLETED_BY, "force"); 1646 Exchange answer = onCompletion(key, exchange, exchange, false); 1647 if (answer != null) { 1648 onSubmitCompletion(key, answer); 1649 } 1650 } 1651 } 1652 } finally { 1653 if (!optimisticLocking) { 1654 lock.unlock(); 1655 } 1656 } 1657 } 1658 LOG.trace("Completed force completion of all groups task"); 1659 1660 if (total > 0) { 1661 LOG.debug("Forcing completion of all groups with {} exchanges", total); 1662 } 1663 return total; 1664 } 1665 1666}