001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements. See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License. You may obtain a copy of the License at
008 *
009 * http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017 package org.apache.camel.processor;
018
019 import java.util.concurrent.Callable;
020 import java.util.concurrent.RejectedExecutionException;
021 import java.util.concurrent.ScheduledExecutorService;
022 import java.util.concurrent.TimeUnit;
023
024 import org.apache.camel.AsyncCallback;
025 import org.apache.camel.AsyncProcessor;
026 import org.apache.camel.CamelContext;
027 import org.apache.camel.Exchange;
028 import org.apache.camel.LoggingLevel;
029 import org.apache.camel.Message;
030 import org.apache.camel.Predicate;
031 import org.apache.camel.Processor;
032 import org.apache.camel.model.OnExceptionDefinition;
033 import org.apache.camel.spi.SubUnitOfWorkCallback;
034 import org.apache.camel.spi.UnitOfWork;
035 import org.apache.camel.util.AsyncProcessorConverterHelper;
036 import org.apache.camel.util.AsyncProcessorHelper;
037 import org.apache.camel.util.CamelContextHelper;
038 import org.apache.camel.util.CamelLogger;
039 import org.apache.camel.util.EventHelper;
040 import org.apache.camel.util.ExchangeHelper;
041 import org.apache.camel.util.MessageHelper;
042 import org.apache.camel.util.ObjectHelper;
043 import org.apache.camel.util.ServiceHelper;
044
045 /**
046 * Base redeliverable error handler that also supports a final dead letter queue in case
047 * all redelivery attempts fail.
048 * <p/>
049 * This implementation should contain all the error handling logic and the sub classes
050 * should only configure it according to what they support.
051 *
052 * @version
053 */
054 public abstract class RedeliveryErrorHandler extends ErrorHandlerSupport implements AsyncProcessor {
055
056 protected ScheduledExecutorService executorService;
057 protected final CamelContext camelContext;
058 protected final Processor deadLetter;
059 protected final String deadLetterUri;
060 protected final Processor output;
061 protected final AsyncProcessor outputAsync;
062 protected final Processor redeliveryProcessor;
063 protected final RedeliveryPolicy redeliveryPolicy;
064 protected final Predicate retryWhilePolicy;
065 protected final CamelLogger logger;
066 protected final boolean useOriginalMessagePolicy;
067 protected boolean redeliveryEnabled;
068
069 /**
070 * Contains the current redelivery data
071 */
072 protected class RedeliveryData {
073 Exchange original;
074 boolean sync = true;
075 int redeliveryCounter;
076 long redeliveryDelay;
077 Predicate retryWhilePredicate = retryWhilePolicy;
078 boolean redeliverFromSync;
079
080 // default behavior which can be overloaded on a per exception basis
081 RedeliveryPolicy currentRedeliveryPolicy = redeliveryPolicy;
082 Processor deadLetterProcessor = deadLetter;
083 Processor failureProcessor;
084 Processor onRedeliveryProcessor = redeliveryProcessor;
085 Predicate handledPredicate = getDefaultHandledPredicate();
086 Predicate continuedPredicate;
087 boolean useOriginalInMessage = useOriginalMessagePolicy;
088 boolean asyncDelayedRedelivery = redeliveryPolicy.isAsyncDelayedRedelivery();
089 }
090
091 /**
092 * Tasks which performs asynchronous redelivery attempts, and being triggered by a
093 * {@link java.util.concurrent.ScheduledExecutorService} to avoid having any threads blocking if a task
094 * has to be delayed before a redelivery attempt is performed.
095 */
096 private class AsyncRedeliveryTask implements Callable<Boolean> {
097
098 private final Exchange exchange;
099 private final AsyncCallback callback;
100 private final RedeliveryData data;
101
102 public AsyncRedeliveryTask(Exchange exchange, AsyncCallback callback, RedeliveryData data) {
103 this.exchange = exchange;
104 this.callback = callback;
105 this.data = data;
106 }
107
108 public Boolean call() throws Exception {
109 // prepare for redelivery
110 prepareExchangeForRedelivery(exchange, data);
111
112 // letting onRedeliver be executed at first
113 deliverToOnRedeliveryProcessor(exchange, data);
114
115 if (log.isTraceEnabled()) {
116 log.trace("Redelivering exchangeId: {} -> {} for Exchange: {}", new Object[]{exchange.getExchangeId(), outputAsync, exchange});
117 }
118
119 // emmit event we are doing redelivery
120 EventHelper.notifyExchangeRedelivery(exchange.getContext(), exchange, data.redeliveryCounter);
121
122 // process the exchange (also redelivery)
123 boolean sync;
124 if (data.redeliverFromSync) {
125 // this redelivery task was scheduled from synchronous, which we forced to be asynchronous from
126 // this error handler, which means we have to invoke the callback with false, to have the callback
127 // be notified when we are done
128 sync = AsyncProcessorHelper.process(outputAsync, exchange, new AsyncCallback() {
129 public void done(boolean doneSync) {
130 log.trace("Redelivering exchangeId: {} done sync: {}", exchange.getExchangeId(), doneSync);
131
132 // mark we are in sync mode now
133 data.sync = false;
134
135 // only process if the exchange hasn't failed
136 // and it has not been handled by the error processor
137 if (isDone(exchange)) {
138 callback.done(false);
139 return;
140 }
141
142 // error occurred so loop back around which we do by invoking the processAsyncErrorHandler
143 processAsyncErrorHandler(exchange, callback, data);
144 }
145 });
146 } else {
147 // this redelivery task was scheduled from asynchronous, which means we should only
148 // handle when the asynchronous task was done
149 sync = AsyncProcessorHelper.process(outputAsync, exchange, new AsyncCallback() {
150 public void done(boolean doneSync) {
151 log.trace("Redelivering exchangeId: {} done sync: {}", exchange.getExchangeId(), doneSync);
152
153 // this callback should only handle the async case
154 if (doneSync) {
155 return;
156 }
157
158 // mark we are in async mode now
159 data.sync = false;
160
161 // only process if the exchange hasn't failed
162 // and it has not been handled by the error processor
163 if (isDone(exchange)) {
164 callback.done(doneSync);
165 return;
166 }
167 // error occurred so loop back around which we do by invoking the processAsyncErrorHandler
168 processAsyncErrorHandler(exchange, callback, data);
169 }
170 });
171 }
172
173 return sync;
174 }
175 }
176
177 public RedeliveryErrorHandler(CamelContext camelContext, Processor output, CamelLogger logger,
178 Processor redeliveryProcessor, RedeliveryPolicy redeliveryPolicy, Processor deadLetter,
179 String deadLetterUri, boolean useOriginalMessagePolicy, Predicate retryWhile, ScheduledExecutorService executorService) {
180
181 ObjectHelper.notNull(camelContext, "CamelContext", this);
182 ObjectHelper.notNull(redeliveryPolicy, "RedeliveryPolicy", this);
183
184 this.camelContext = camelContext;
185 this.redeliveryProcessor = redeliveryProcessor;
186 this.deadLetter = deadLetter;
187 this.output = output;
188 this.outputAsync = AsyncProcessorConverterHelper.convert(output);
189 this.redeliveryPolicy = redeliveryPolicy;
190 this.logger = logger;
191 this.deadLetterUri = deadLetterUri;
192 this.useOriginalMessagePolicy = useOriginalMessagePolicy;
193 this.retryWhilePolicy = retryWhile;
194 this.executorService = executorService;
195 }
196
197 public boolean supportTransacted() {
198 return false;
199 }
200
201 @Override
202 public boolean isRunAllowed() {
203 // determine if we can still run, or the camel context is forcing a shutdown
204 boolean forceShutdown = camelContext.getShutdownStrategy().forceShutdown(this);
205 if (forceShutdown) {
206 log.trace("Run not allowed as ShutdownStrategy is forcing shutting down");
207 }
208 return !forceShutdown && super.isRunAllowed();
209 }
210
211 public void process(Exchange exchange) throws Exception {
212 if (output == null) {
213 // no output then just return
214 return;
215 }
216 AsyncProcessorHelper.process(this, exchange);
217 }
218
219 public boolean process(Exchange exchange, final AsyncCallback callback) {
220 return processErrorHandler(exchange, callback, new RedeliveryData());
221 }
222
223 /**
224 * Process the exchange using redelivery error handling.
225 */
226 protected boolean processErrorHandler(final Exchange exchange, final AsyncCallback callback, final RedeliveryData data) {
227
228 // do a defensive copy of the original Exchange, which is needed for redelivery so we can ensure the
229 // original Exchange is being redelivered, and not a mutated Exchange
230 data.original = defensiveCopyExchangeIfNeeded(exchange);
231
232 // use looping to have redelivery attempts
233 while (true) {
234
235 // can we still run
236 if (!isRunAllowed()) {
237 log.trace("Run not allowed, will reject executing exchange: {}", exchange);
238 if (exchange.getException() == null) {
239 exchange.setException(new RejectedExecutionException());
240 }
241 // we cannot process so invoke callback
242 callback.done(data.sync);
243 return data.sync;
244 }
245
246 // did previous processing cause an exception?
247 boolean handle = shouldHandleException(exchange);
248 if (handle) {
249 handleException(exchange, data);
250 }
251
252 // compute if we are exhausted or not
253 boolean exhausted = isExhausted(exchange, data);
254 if (exhausted) {
255 Processor target = null;
256 boolean deliver = true;
257
258 // the unit of work may have an optional callback associated we need to leverage
259 SubUnitOfWorkCallback uowCallback = exchange.getUnitOfWork().getSubUnitOfWorkCallback();
260 if (uowCallback != null) {
261 // signal to the callback we are exhausted
262 uowCallback.onExhausted(exchange);
263 // do not deliver to the failure processor as its been handled by the callback instead
264 deliver = false;
265 }
266
267 if (deliver) {
268 // should deliver to failure processor (either from onException or the dead letter channel)
269 target = data.failureProcessor != null ? data.failureProcessor : data.deadLetterProcessor;
270 }
271 // we should always invoke the deliverToFailureProcessor as it prepares, logs and does a fair
272 // bit of work for exhausted exchanges (its only the target processor which may be null if handled by a savepoint)
273 boolean sync = deliverToFailureProcessor(target, exchange, data, callback);
274 // we are breaking out
275 return sync;
276 }
277
278 if (data.redeliveryCounter > 0) {
279 // calculate delay
280 data.redeliveryDelay = determineRedeliveryDelay(exchange, data.currentRedeliveryPolicy, data.redeliveryDelay, data.redeliveryCounter);
281
282 if (data.redeliveryDelay > 0) {
283 // okay there is a delay so create a scheduled task to have it executed in the future
284
285 if (data.currentRedeliveryPolicy.isAsyncDelayedRedelivery() && !exchange.isTransacted()) {
286 // let the RedeliverTask be the logic which tries to redeliver the Exchange which we can used a scheduler to
287 // have it being executed in the future, or immediately
288 // we are continuing asynchronously
289
290 // mark we are routing async from now and that this redelivery task came from a synchronous routing
291 data.sync = false;
292 data.redeliverFromSync = true;
293 AsyncRedeliveryTask task = new AsyncRedeliveryTask(exchange, callback, data);
294
295 // schedule the redelivery task
296 if (log.isTraceEnabled()) {
297 log.trace("Scheduling redelivery task to run in {} millis for exchangeId: {}", data.redeliveryDelay, exchange.getExchangeId());
298 }
299 executorService.schedule(task, data.redeliveryDelay, TimeUnit.MILLISECONDS);
300
301 return false;
302 } else {
303 // async delayed redelivery was disabled or we are transacted so we must be synchronous
304 // as the transaction manager requires to execute in the same thread context
305 try {
306 data.currentRedeliveryPolicy.sleep(data.redeliveryDelay);
307 } catch (InterruptedException e) {
308 // we was interrupted so break out
309 exchange.setException(e);
310 // mark the exchange to stop continue routing when interrupted
311 // as we do not want to continue routing (for example a task has been cancelled)
312 exchange.setProperty(Exchange.ROUTE_STOP, Boolean.TRUE);
313 callback.done(data.sync);
314 return data.sync;
315 }
316 }
317 }
318
319 // prepare for redelivery
320 prepareExchangeForRedelivery(exchange, data);
321
322 // letting onRedeliver be executed
323 deliverToOnRedeliveryProcessor(exchange, data);
324
325 // emmit event we are doing redelivery
326 EventHelper.notifyExchangeRedelivery(exchange.getContext(), exchange, data.redeliveryCounter);
327 }
328
329 // process the exchange (also redelivery)
330 boolean sync = AsyncProcessorHelper.process(outputAsync, exchange, new AsyncCallback() {
331 public void done(boolean sync) {
332 // this callback should only handle the async case
333 if (sync) {
334 return;
335 }
336
337 // mark we are in async mode now
338 data.sync = false;
339
340 // if we are done then notify callback and exit
341 if (isDone(exchange)) {
342 callback.done(sync);
343 return;
344 }
345
346 // error occurred so loop back around which we do by invoking the processAsyncErrorHandler
347 // method which takes care of this in a asynchronous manner
348 processAsyncErrorHandler(exchange, callback, data);
349 }
350 });
351
352 if (!sync) {
353 // the remainder of the Exchange is being processed asynchronously so we should return
354 return false;
355 }
356 // we continue to route synchronously
357
358 // if we are done then notify callback and exit
359 boolean done = isDone(exchange);
360 if (done) {
361 callback.done(true);
362 return true;
363 }
364
365 // error occurred so loop back around.....
366 }
367 }
368
369 /**
370 * <p>Determines the redelivery delay time by first inspecting the Message header {@link Exchange#REDELIVERY_DELAY}
371 * and if not present, defaulting to {@link RedeliveryPolicy#calculateRedeliveryDelay(long, int)}</p>
372 *
373 * <p>In order to prevent manipulation of the RedeliveryData state, the values of {@link RedeliveryData#redeliveryDelay}
374 * and {@link RedeliveryData#redeliveryCounter} are copied in.</p>
375 *
376 * @param exchange The current exchange in question.
377 * @param redeliveryPolicy The RedeliveryPolicy to use in the calculation.
378 * @param redeliveryDelay The default redelivery delay from RedeliveryData
379 * @param redeliveryCounter The redeliveryCounter
380 * @return The time to wait before the next redelivery.
381 */
382 protected long determineRedeliveryDelay(Exchange exchange, RedeliveryPolicy redeliveryPolicy, long redeliveryDelay, int redeliveryCounter) {
383 Message message = exchange.getIn();
384 Long delay = message.getHeader(Exchange.REDELIVERY_DELAY, Long.class);
385 if (delay == null) {
386 delay = redeliveryPolicy.calculateRedeliveryDelay(redeliveryDelay, redeliveryCounter);
387 log.debug("Redelivery delay calculated as {}", delay);
388 } else {
389 log.debug("Redelivery delay is {} from Message Header [{}]", delay, Exchange.REDELIVERY_DELAY);
390 }
391 return delay;
392 }
393
394 /**
395 * This logic is only executed if we have to retry redelivery asynchronously, which have to be done from the callback.
396 * <p/>
397 * And therefore the logic is a bit different than the synchronous <tt>processErrorHandler</tt> method which can use
398 * a loop based redelivery technique. However this means that these two methods in overall have to be in <b>sync</b>
399 * in terms of logic.
400 */
401 protected void processAsyncErrorHandler(final Exchange exchange, final AsyncCallback callback, final RedeliveryData data) {
402 // can we still run
403 if (!isRunAllowed()) {
404 log.trace("Run not allowed, will reject executing exchange: {}", exchange);
405 if (exchange.getException() == null) {
406 exchange.setException(new RejectedExecutionException());
407 }
408 callback.done(data.sync);
409 return;
410 }
411
412 // did previous processing cause an exception?
413 boolean handle = shouldHandleException(exchange);
414 if (handle) {
415 handleException(exchange, data);
416 }
417
418 // compute if we are exhausted or not
419 boolean exhausted = isExhausted(exchange, data);
420 if (exhausted) {
421 Processor target = null;
422 boolean deliver = true;
423
424 // the unit of work may have an optional callback associated we need to leverage
425 SubUnitOfWorkCallback uowCallback = exchange.getUnitOfWork().getSubUnitOfWorkCallback();
426 if (uowCallback != null) {
427 // signal to the callback we are exhausted
428 uowCallback.onExhausted(exchange);
429 // do not deliver to the failure processor as its been handled by the callback instead
430 deliver = false;
431 }
432
433 if (deliver) {
434 // should deliver to failure processor (either from onException or the dead letter channel)
435 target = data.failureProcessor != null ? data.failureProcessor : data.deadLetterProcessor;
436 }
437 // we should always invoke the deliverToFailureProcessor as it prepares, logs and does a fair
438 // bit of work for exhausted exchanges (its only the target processor which may be null if handled by a savepoint)
439 deliverToFailureProcessor(target, exchange, data, callback);
440 // we are breaking out
441 return;
442 }
443
444 if (data.redeliveryCounter > 0) {
445 // let the RedeliverTask be the logic which tries to redeliver the Exchange which we can used a scheduler to
446 // have it being executed in the future, or immediately
447 // Note: the data.redeliverFromSync should be kept as is, in case it was enabled previously
448 // to ensure the callback will continue routing from where we left
449 AsyncRedeliveryTask task = new AsyncRedeliveryTask(exchange, callback, data);
450
451 // calculate the redelivery delay
452 data.redeliveryDelay = data.currentRedeliveryPolicy.calculateRedeliveryDelay(data.redeliveryDelay, data.redeliveryCounter);
453 if (data.redeliveryDelay > 0) {
454 // schedule the redelivery task
455 if (log.isTraceEnabled()) {
456 log.trace("Scheduling redelivery task to run in {} millis for exchangeId: {}", data.redeliveryDelay, exchange.getExchangeId());
457 }
458 executorService.schedule(task, data.redeliveryDelay, TimeUnit.MILLISECONDS);
459 } else {
460 // execute the task immediately
461 executorService.submit(task);
462 }
463 }
464 }
465
466 /**
467 * Performs a defensive copy of the exchange if needed
468 *
469 * @param exchange the exchange
470 * @return the defensive copy, or <tt>null</tt> if not needed (redelivery is not enabled).
471 */
472 protected Exchange defensiveCopyExchangeIfNeeded(Exchange exchange) {
473 // only do a defensive copy if redelivery is enabled
474 if (redeliveryEnabled) {
475 return ExchangeHelper.createCopy(exchange, true);
476 } else {
477 return null;
478 }
479 }
480
481 /**
482 * Strategy whether the exchange has an exception that we should try to handle.
483 * <p/>
484 * Standard implementations should just look for an exception.
485 */
486 protected boolean shouldHandleException(Exchange exchange) {
487 return exchange.getException() != null;
488 }
489
490 /**
491 * Strategy to determine if the exchange is done so we can continue
492 */
493 protected boolean isDone(Exchange exchange) {
494 boolean answer = isCancelledOrInterrupted(exchange);
495
496 // only done if the exchange hasn't failed
497 // and it has not been handled by the failure processor
498 // or we are exhausted
499 if (!answer) {
500 answer = exchange.getException() == null
501 || ExchangeHelper.isFailureHandled(exchange)
502 || ExchangeHelper.isRedeliveryExhausted(exchange);
503 }
504
505 log.trace("Is exchangeId: {} done? {}", exchange.getExchangeId(), answer);
506 return answer;
507 }
508
509 /**
510 * Strategy to determine if the exchange was cancelled or interrupted
511 */
512 protected boolean isCancelledOrInterrupted(Exchange exchange) {
513 boolean answer = false;
514
515 if (ExchangeHelper.isInterrupted(exchange)) {
516 // mark the exchange to stop continue routing when interrupted
517 // as we do not want to continue routing (for example a task has been cancelled)
518 exchange.setProperty(Exchange.ROUTE_STOP, Boolean.TRUE);
519 answer = true;
520 }
521
522 log.trace("Is exchangeId: {} interrupted? {}", exchange.getExchangeId(), answer);
523 return answer;
524 }
525
526 /**
527 * Returns the output processor
528 */
529 public Processor getOutput() {
530 return output;
531 }
532
533 /**
534 * Returns the dead letter that message exchanges will be sent to if the
535 * redelivery attempts fail
536 */
537 public Processor getDeadLetter() {
538 return deadLetter;
539 }
540
541 public String getDeadLetterUri() {
542 return deadLetterUri;
543 }
544
545 public boolean isUseOriginalMessagePolicy() {
546 return useOriginalMessagePolicy;
547 }
548
549 public RedeliveryPolicy getRedeliveryPolicy() {
550 return redeliveryPolicy;
551 }
552
553 public CamelLogger getLogger() {
554 return logger;
555 }
556
557 protected Predicate getDefaultHandledPredicate() {
558 // Default is not not handle errors
559 return null;
560 }
561
562 protected void prepareExchangeForContinue(Exchange exchange, RedeliveryData data) {
563 Exception caught = exchange.getException();
564
565 // we continue so clear any exceptions
566 exchange.setException(null);
567 // clear rollback flags
568 exchange.setProperty(Exchange.ROLLBACK_ONLY, null);
569 // reset cached streams so they can be read again
570 MessageHelper.resetStreamCache(exchange.getIn());
571
572 // its continued then remove traces of redelivery attempted and caught exception
573 exchange.getIn().removeHeader(Exchange.REDELIVERED);
574 exchange.getIn().removeHeader(Exchange.REDELIVERY_COUNTER);
575 exchange.getIn().removeHeader(Exchange.REDELIVERY_MAX_COUNTER);
576 exchange.removeProperty(Exchange.FAILURE_HANDLED);
577 // keep the Exchange.EXCEPTION_CAUGHT as property so end user knows the caused exception
578
579 // create log message
580 String msg = "Failed delivery for " + ExchangeHelper.logIds(exchange);
581 msg = msg + ". Exhausted after delivery attempt: " + data.redeliveryCounter + " caught: " + caught;
582 msg = msg + ". Handled and continue routing.";
583
584 // log that we failed but want to continue
585 logFailedDelivery(false, false, true, exchange, msg, data, null);
586 }
587
588 protected void prepareExchangeForRedelivery(Exchange exchange, RedeliveryData data) {
589 if (!redeliveryEnabled) {
590 throw new IllegalStateException("Redelivery is not enabled on " + this + ". Make sure you have configured the error handler properly.");
591 }
592 // there must be a defensive copy of the exchange
593 ObjectHelper.notNull(data.original, "Defensive copy of Exchange is null", this);
594
595 // okay we will give it another go so clear the exception so we can try again
596 exchange.setException(null);
597
598 // clear rollback flags
599 exchange.setProperty(Exchange.ROLLBACK_ONLY, null);
600
601 // TODO: We may want to store these as state on RedeliveryData so we keep them in case end user messes with Exchange
602 // and then put these on the exchange when doing a redelivery / fault processor
603
604 // preserve these headers
605 Integer redeliveryCounter = exchange.getIn().getHeader(Exchange.REDELIVERY_COUNTER, Integer.class);
606 Integer redeliveryMaxCounter = exchange.getIn().getHeader(Exchange.REDELIVERY_MAX_COUNTER, Integer.class);
607 Boolean redelivered = exchange.getIn().getHeader(Exchange.REDELIVERED, Boolean.class);
608
609 // we are redelivering so copy from original back to exchange
610 exchange.getIn().copyFrom(data.original.getIn());
611 exchange.setOut(null);
612 // reset cached streams so they can be read again
613 MessageHelper.resetStreamCache(exchange.getIn());
614
615 // put back headers
616 if (redeliveryCounter != null) {
617 exchange.getIn().setHeader(Exchange.REDELIVERY_COUNTER, redeliveryCounter);
618 }
619 if (redeliveryMaxCounter != null) {
620 exchange.getIn().setHeader(Exchange.REDELIVERY_MAX_COUNTER, redeliveryMaxCounter);
621 }
622 if (redelivered != null) {
623 exchange.getIn().setHeader(Exchange.REDELIVERED, redelivered);
624 }
625 }
626
627 protected void handleException(Exchange exchange, RedeliveryData data) {
628 Exception e = exchange.getException();
629
630 // store the original caused exception in a property, so we can restore it later
631 exchange.setProperty(Exchange.EXCEPTION_CAUGHT, e);
632
633 // find the error handler to use (if any)
634 OnExceptionDefinition exceptionPolicy = getExceptionPolicy(exchange, e);
635 if (exceptionPolicy != null) {
636 data.currentRedeliveryPolicy = exceptionPolicy.createRedeliveryPolicy(exchange.getContext(), data.currentRedeliveryPolicy);
637 data.handledPredicate = exceptionPolicy.getHandledPolicy();
638 data.continuedPredicate = exceptionPolicy.getContinuedPolicy();
639 data.retryWhilePredicate = exceptionPolicy.getRetryWhilePolicy();
640 data.useOriginalInMessage = exceptionPolicy.isUseOriginalMessage();
641 data.asyncDelayedRedelivery = exceptionPolicy.isAsyncDelayedRedelivery(exchange.getContext());
642
643 // route specific failure handler?
644 Processor processor = null;
645 UnitOfWork uow = exchange.getUnitOfWork();
646 if (uow != null && uow.getRouteContext() != null) {
647 String routeId = uow.getRouteContext().getRoute().getId();
648 processor = exceptionPolicy.getErrorHandler(routeId);
649 } else if (!exceptionPolicy.getErrorHandlers().isEmpty()) {
650 // note this should really not happen, but we have this code as a fail safe
651 // to be backwards compatible with the old behavior
652 log.warn("Cannot determine current route from Exchange with id: {}, will fallback and use first error handler.", exchange.getExchangeId());
653 processor = exceptionPolicy.getErrorHandlers().iterator().next();
654 }
655 if (processor != null) {
656 data.failureProcessor = processor;
657 }
658
659 // route specific on redelivery?
660 processor = exceptionPolicy.getOnRedelivery();
661 if (processor != null) {
662 data.onRedeliveryProcessor = processor;
663 }
664 }
665
666 // only log if not failure handled or not an exhausted unit of work
667 if (!ExchangeHelper.isFailureHandled(exchange) && !ExchangeHelper.isUnitOfWorkExhausted(exchange)) {
668 String msg = "Failed delivery for " + ExchangeHelper.logIds(exchange)
669 + ". On delivery attempt: " + data.redeliveryCounter + " caught: " + e;
670 logFailedDelivery(true, false, false, exchange, msg, data, e);
671 }
672
673 data.redeliveryCounter = incrementRedeliveryCounter(exchange, e, data);
674 }
675
676 /**
677 * Gives an optional configure redelivery processor a chance to process before the Exchange
678 * will be redelivered. This can be used to alter the Exchange.
679 */
680 protected void deliverToOnRedeliveryProcessor(final Exchange exchange, final RedeliveryData data) {
681 if (data.onRedeliveryProcessor == null) {
682 return;
683 }
684
685 if (log.isTraceEnabled()) {
686 log.trace("Redelivery processor {} is processing Exchange: {} before its redelivered",
687 data.onRedeliveryProcessor, exchange);
688 }
689
690 // run this synchronously as its just a Processor
691 try {
692 data.onRedeliveryProcessor.process(exchange);
693 } catch (Throwable e) {
694 exchange.setException(e);
695 }
696 log.trace("Redelivery processor done");
697 }
698
699 /**
700 * All redelivery attempts failed so move the exchange to the dead letter queue
701 */
702 protected boolean deliverToFailureProcessor(final Processor processor, final Exchange exchange,
703 final RedeliveryData data, final AsyncCallback callback) {
704 boolean sync = true;
705
706 Exception caught = exchange.getException();
707
708 // we did not success with the redelivery so now we let the failure processor handle it
709 // clear exception as we let the failure processor handle it
710 exchange.setException(null);
711
712 final boolean shouldHandle = shouldHandled(exchange, data);
713 final boolean shouldContinue = shouldContinue(exchange, data);
714 // regard both handled or continued as being handled
715 boolean handled = false;
716
717 if (shouldHandle || shouldContinue) {
718 // its handled then remove traces of redelivery attempted
719 exchange.getIn().removeHeader(Exchange.REDELIVERED);
720 exchange.getIn().removeHeader(Exchange.REDELIVERY_COUNTER);
721 exchange.getIn().removeHeader(Exchange.REDELIVERY_MAX_COUNTER);
722 exchange.removeProperty(Exchange.REDELIVERY_EXHAUSTED);
723
724 // and remove traces of rollback only and uow exhausted markers
725 exchange.removeProperty(Exchange.ROLLBACK_ONLY);
726 exchange.removeProperty(Exchange.UNIT_OF_WORK_EXHAUSTED);
727
728 handled = true;
729 } else {
730 // must decrement the redelivery counter as we didn't process the redelivery but is
731 // handling by the failure handler. So we must -1 to not let the counter be out-of-sync
732 decrementRedeliveryCounter(exchange);
733 }
734
735 // is the a failure processor to process the Exchange
736 if (processor != null) {
737
738 // prepare original IN body if it should be moved instead of current body
739 if (data.useOriginalInMessage) {
740 log.trace("Using the original IN message instead of current");
741 Message original = exchange.getUnitOfWork().getOriginalInMessage();
742 exchange.setIn(original);
743 if (exchange.hasOut()) {
744 log.trace("Removing the out message to avoid some uncertain behavior");
745 exchange.setOut(null);
746 }
747 }
748
749 // reset cached streams so they can be read again
750 MessageHelper.resetStreamCache(exchange.getIn());
751
752 log.trace("Failure processor {} is processing Exchange: {}", processor, exchange);
753
754 // store the last to endpoint as the failure endpoint
755 exchange.setProperty(Exchange.FAILURE_ENDPOINT, exchange.getProperty(Exchange.TO_ENDPOINT));
756
757 // the failure processor could also be asynchronous
758 AsyncProcessor afp = AsyncProcessorConverterHelper.convert(processor);
759 sync = AsyncProcessorHelper.process(afp, exchange, new AsyncCallback() {
760 public void done(boolean sync) {
761 log.trace("Failure processor done: {} processing Exchange: {}", processor, exchange);
762 try {
763 prepareExchangeAfterFailure(exchange, data, shouldHandle, shouldContinue);
764 // fire event as we had a failure processor to handle it, which there is a event for
765 boolean deadLetterChannel = processor == data.deadLetterProcessor && data.deadLetterProcessor != null;
766 EventHelper.notifyExchangeFailureHandled(exchange.getContext(), exchange, processor, deadLetterChannel);
767 } finally {
768 // if the fault was handled asynchronously, this should be reflected in the callback as well
769 data.sync &= sync;
770 callback.done(data.sync);
771 }
772 }
773 });
774 } else {
775 try {
776 // no processor but we need to prepare after failure as well
777 prepareExchangeAfterFailure(exchange, data, shouldHandle, shouldContinue);
778 } finally {
779 // callback we are done
780 callback.done(data.sync);
781 }
782 }
783
784 // create log message
785 String msg = "Failed delivery for " + ExchangeHelper.logIds(exchange);
786 msg = msg + ". Exhausted after delivery attempt: " + data.redeliveryCounter + " caught: " + caught;
787 if (processor != null) {
788 msg = msg + ". Processed by failure processor: " + processor;
789 }
790
791 // log that we failed delivery as we are exhausted
792 logFailedDelivery(false, handled, false, exchange, msg, data, null);
793
794 return sync;
795 }
796
797 protected void prepareExchangeAfterFailure(final Exchange exchange, final RedeliveryData data,
798 final boolean shouldHandle, final boolean shouldContinue) {
799 // we could not process the exchange so we let the failure processor handled it
800 ExchangeHelper.setFailureHandled(exchange);
801
802 // honor if already set a handling
803 boolean alreadySet = exchange.getProperty(Exchange.ERRORHANDLER_HANDLED) != null;
804 if (alreadySet) {
805 boolean handled = exchange.getProperty(Exchange.ERRORHANDLER_HANDLED, Boolean.class);
806 log.trace("This exchange has already been marked for handling: {}", handled);
807 if (handled) {
808 exchange.setException(null);
809 } else {
810 // exception not handled, put exception back in the exchange
811 exchange.setException(exchange.getProperty(Exchange.EXCEPTION_CAUGHT, Exception.class));
812 // and put failure endpoint back as well
813 exchange.setProperty(Exchange.FAILURE_ENDPOINT, exchange.getProperty(Exchange.TO_ENDPOINT));
814 }
815 return;
816 }
817
818 if (shouldHandle) {
819 log.trace("This exchange is handled so its marked as not failed: {}", exchange);
820 exchange.setProperty(Exchange.ERRORHANDLER_HANDLED, Boolean.TRUE);
821 } else if (shouldContinue) {
822 log.trace("This exchange is continued: {}", exchange);
823 // okay we want to continue then prepare the exchange for that as well
824 prepareExchangeForContinue(exchange, data);
825 } else {
826 log.trace("This exchange is not handled or continued so its marked as failed: {}", exchange);
827 // exception not handled, put exception back in the exchange
828 exchange.setProperty(Exchange.ERRORHANDLER_HANDLED, Boolean.FALSE);
829 exchange.setException(exchange.getProperty(Exchange.EXCEPTION_CAUGHT, Exception.class));
830 // and put failure endpoint back as well
831 exchange.setProperty(Exchange.FAILURE_ENDPOINT, exchange.getProperty(Exchange.TO_ENDPOINT));
832 }
833 }
834
835 private void logFailedDelivery(boolean shouldRedeliver, boolean handled, boolean continued, Exchange exchange, String message, RedeliveryData data, Throwable e) {
836 if (logger == null) {
837 return;
838 }
839
840 if (!exchange.isRollbackOnly()) {
841 // if we should not rollback, then check whether logging is enabled
842 if (handled && !data.currentRedeliveryPolicy.isLogHandled()) {
843 // do not log handled
844 return;
845 }
846
847 if (continued && !data.currentRedeliveryPolicy.isLogContinued()) {
848 // do not log handled
849 return;
850 }
851
852 if (shouldRedeliver && !data.currentRedeliveryPolicy.isLogRetryAttempted()) {
853 // do not log retry attempts
854 return;
855 }
856
857 if (!shouldRedeliver && !data.currentRedeliveryPolicy.isLogExhausted()) {
858 // do not log exhausted
859 return;
860 }
861 }
862
863 LoggingLevel newLogLevel;
864 boolean logStackTrace;
865 if (exchange.isRollbackOnly()) {
866 newLogLevel = data.currentRedeliveryPolicy.getRetriesExhaustedLogLevel();
867 logStackTrace = data.currentRedeliveryPolicy.isLogStackTrace();
868 } else if (shouldRedeliver) {
869 newLogLevel = data.currentRedeliveryPolicy.getRetryAttemptedLogLevel();
870 logStackTrace = data.currentRedeliveryPolicy.isLogRetryStackTrace();
871 } else {
872 newLogLevel = data.currentRedeliveryPolicy.getRetriesExhaustedLogLevel();
873 logStackTrace = data.currentRedeliveryPolicy.isLogStackTrace();
874 }
875 if (e == null) {
876 e = exchange.getProperty(Exchange.EXCEPTION_CAUGHT, Exception.class);
877 }
878
879 if (exchange.isRollbackOnly()) {
880 String msg = "Rollback " + ExchangeHelper.logIds(exchange);
881 Throwable cause = exchange.getException() != null ? exchange.getException() : exchange.getProperty(Exchange.EXCEPTION_CAUGHT, Throwable.class);
882 if (cause != null) {
883 msg = msg + " due: " + cause.getMessage();
884 }
885 if (newLogLevel == LoggingLevel.ERROR) {
886 // log intended rollback on maximum WARN level (no ERROR)
887 logger.log(msg, LoggingLevel.WARN);
888 } else {
889 // otherwise use the desired logging level
890 logger.log(msg, newLogLevel);
891 }
892 } else if (e != null && logStackTrace) {
893 logger.log(message, e, newLogLevel);
894 } else {
895 logger.log(message, newLogLevel);
896 }
897 }
898
899 /**
900 * Determines whether the exchange is exhausted (or anyway marked to not continue such as rollback).
901 * <p/>
902 * If the exchange is exhausted, then we will not continue processing, but let the
903 * failure processor deal with the exchange.
904 *
905 * @param exchange the current exchange
906 * @param data the redelivery data
907 * @return <tt>false</tt> to continue/redeliver, or <tt>true</tt> to exhaust.
908 */
909 private boolean isExhausted(Exchange exchange, RedeliveryData data) {
910 // if marked as rollback only then do not continue/redeliver
911 boolean exhausted = exchange.getProperty(Exchange.REDELIVERY_EXHAUSTED, false, Boolean.class);
912 if (exhausted) {
913 log.trace("This exchange is marked as redelivery exhausted: {}", exchange);
914 return true;
915 }
916
917 // if marked as rollback only then do not continue/redeliver
918 boolean rollbackOnly = exchange.getProperty(Exchange.ROLLBACK_ONLY, false, Boolean.class);
919 if (rollbackOnly) {
920 log.trace("This exchange is marked as rollback only, so forcing it to be exhausted: {}", exchange);
921 return true;
922 }
923 // its the first original call so continue
924 if (data.redeliveryCounter == 0) {
925 return false;
926 }
927 // its a potential redelivery so determine if we should redeliver or not
928 boolean redeliver = data.currentRedeliveryPolicy.shouldRedeliver(exchange, data.redeliveryCounter, data.retryWhilePredicate);
929 return !redeliver;
930 }
931
932 /**
933 * Determines whether or not to continue if we are exhausted.
934 *
935 * @param exchange the current exchange
936 * @param data the redelivery data
937 * @return <tt>true</tt> to continue, or <tt>false</tt> to exhaust.
938 */
939 private boolean shouldContinue(Exchange exchange, RedeliveryData data) {
940 if (data.continuedPredicate != null) {
941 return data.continuedPredicate.matches(exchange);
942 }
943 // do not continue by default
944 return false;
945 }
946
947 /**
948 * Determines whether or not to handle if we are exhausted.
949 *
950 * @param exchange the current exchange
951 * @param data the redelivery data
952 * @return <tt>true</tt> to handle, or <tt>false</tt> to exhaust.
953 */
954 private boolean shouldHandled(Exchange exchange, RedeliveryData data) {
955 if (data.handledPredicate != null) {
956 return data.handledPredicate.matches(exchange);
957 }
958 // do not handle by default
959 return false;
960 }
961
962 /**
963 * Increments the redelivery counter and adds the redelivered flag if the
964 * message has been redelivered
965 */
966 private int incrementRedeliveryCounter(Exchange exchange, Throwable e, RedeliveryData data) {
967 Message in = exchange.getIn();
968 Integer counter = in.getHeader(Exchange.REDELIVERY_COUNTER, Integer.class);
969 int next = 1;
970 if (counter != null) {
971 next = counter + 1;
972 }
973 in.setHeader(Exchange.REDELIVERY_COUNTER, next);
974 in.setHeader(Exchange.REDELIVERED, Boolean.TRUE);
975 // if maximum redeliveries is used, then provide that information as well
976 if (data.currentRedeliveryPolicy.getMaximumRedeliveries() > 0) {
977 in.setHeader(Exchange.REDELIVERY_MAX_COUNTER, data.currentRedeliveryPolicy.getMaximumRedeliveries());
978 }
979 return next;
980 }
981
982 /**
983 * Prepares the redelivery counter and boolean flag for the failure handle processor
984 */
985 private void decrementRedeliveryCounter(Exchange exchange) {
986 Message in = exchange.getIn();
987 Integer counter = in.getHeader(Exchange.REDELIVERY_COUNTER, Integer.class);
988 if (counter != null) {
989 int prev = counter - 1;
990 in.setHeader(Exchange.REDELIVERY_COUNTER, prev);
991 // set boolean flag according to counter
992 in.setHeader(Exchange.REDELIVERED, prev > 0 ? Boolean.TRUE : Boolean.FALSE);
993 } else {
994 // not redelivered
995 in.setHeader(Exchange.REDELIVERY_COUNTER, 0);
996 in.setHeader(Exchange.REDELIVERED, Boolean.FALSE);
997 }
998 }
999
1000 /**
1001 * Determines if redelivery is enabled by checking if any of the redelivery policy
1002 * settings may allow redeliveries.
1003 *
1004 * @return <tt>true</tt> if redelivery is possible, <tt>false</tt> otherwise
1005 * @throws Exception can be thrown
1006 */
1007 private boolean determineIfRedeliveryIsEnabled() throws Exception {
1008 // determine if redeliver is enabled either on error handler
1009 if (getRedeliveryPolicy().getMaximumRedeliveries() != 0) {
1010 // must check for != 0 as (-1 means redeliver forever)
1011 return true;
1012 }
1013 if (retryWhilePolicy != null) {
1014 return true;
1015 }
1016
1017 // or on the exception policies
1018 if (!exceptionPolicies.isEmpty()) {
1019 // walk them to see if any of them have a maximum redeliveries > 0 or retry until set
1020 for (OnExceptionDefinition def : exceptionPolicies.values()) {
1021
1022 String ref = def.getRedeliveryPolicyRef();
1023 if (ref != null) {
1024 // lookup in registry if ref provided
1025 RedeliveryPolicy policy = CamelContextHelper.mandatoryLookup(camelContext, ref, RedeliveryPolicy.class);
1026 if (policy.getMaximumRedeliveries() != 0) {
1027 // must check for != 0 as (-1 means redeliver forever)
1028 return true;
1029 }
1030 } else if (def.getRedeliveryPolicy() != null) {
1031 Integer max = CamelContextHelper.parseInteger(camelContext, def.getRedeliveryPolicy().getMaximumRedeliveries());
1032 if (max != null && max != 0) {
1033 // must check for != 0 as (-1 means redeliver forever)
1034 return true;
1035 }
1036 }
1037
1038 if (def.getRetryWhilePolicy() != null || def.getRetryWhile() != null) {
1039 return true;
1040 }
1041 }
1042 }
1043
1044 return false;
1045 }
1046
1047 @Override
1048 protected void doStart() throws Exception {
1049 ServiceHelper.startServices(output, outputAsync, deadLetter);
1050
1051 if (executorService == null) {
1052 // use default shared executor service
1053 executorService = camelContext.getErrorHandlerExecutorService();
1054 }
1055
1056 // determine if redeliver is enabled or not
1057 redeliveryEnabled = determineIfRedeliveryIsEnabled();
1058 if (log.isDebugEnabled()) {
1059 log.debug("Redelivery enabled: {} on error handler: {}", redeliveryEnabled, this);
1060 }
1061 }
1062
1063 @Override
1064 protected void doStop() throws Exception {
1065 // noop, do not stop any services which we only do when shutting down
1066 // as the error handler can be context scoped, and should not stop in case
1067 // a route stops
1068 }
1069
1070 @Override
1071 protected void doShutdown() throws Exception {
1072 ServiceHelper.stopAndShutdownServices(deadLetter, output, outputAsync);
1073 }
1074 }