001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.activemq.store.kahadb;
018
019import static org.apache.activemq.store.kahadb.disk.journal.Location.NOT_SET;
020
021import java.io.ByteArrayInputStream;
022import java.io.ByteArrayOutputStream;
023import java.io.DataInput;
024import java.io.DataOutput;
025import java.io.EOFException;
026import java.io.File;
027import java.io.IOException;
028import java.io.InputStream;
029import java.io.InterruptedIOException;
030import java.io.InvalidClassException;
031import java.io.ObjectInputStream;
032import java.io.ObjectOutputStream;
033import java.io.ObjectStreamClass;
034import java.io.OutputStream;
035import java.util.ArrayList;
036import java.util.Arrays;
037import java.util.Collection;
038import java.util.Collections;
039import java.util.Date;
040import java.util.HashMap;
041import java.util.HashSet;
042import java.util.Iterator;
043import java.util.LinkedHashMap;
044import java.util.LinkedHashSet;
045import java.util.LinkedList;
046import java.util.List;
047import java.util.Map;
048import java.util.Map.Entry;
049import java.util.Set;
050import java.util.SortedSet;
051import java.util.TreeSet;
052import java.util.concurrent.ConcurrentHashMap;
053import java.util.concurrent.ConcurrentMap;
054import java.util.concurrent.Executors;
055import java.util.concurrent.ScheduledExecutorService;
056import java.util.concurrent.ThreadFactory;
057import java.util.concurrent.TimeUnit;
058import java.util.concurrent.atomic.AtomicBoolean;
059import java.util.concurrent.atomic.AtomicLong;
060import java.util.concurrent.atomic.AtomicReference;
061import java.util.concurrent.locks.ReentrantReadWriteLock;
062
063import org.apache.activemq.ActiveMQMessageAuditNoSync;
064import org.apache.activemq.broker.BrokerService;
065import org.apache.activemq.broker.BrokerServiceAware;
066import org.apache.activemq.broker.region.Destination;
067import org.apache.activemq.broker.region.Queue;
068import org.apache.activemq.broker.region.Topic;
069import org.apache.activemq.command.TransactionId;
070import org.apache.activemq.openwire.OpenWireFormat;
071import org.apache.activemq.protobuf.Buffer;
072import org.apache.activemq.store.MessageStore;
073import org.apache.activemq.store.MessageStoreStatistics;
074import org.apache.activemq.store.MessageStoreSubscriptionStatistics;
075import org.apache.activemq.store.PersistenceAdapterStatistics;
076import org.apache.activemq.store.TopicMessageStore;
077import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand;
078import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand;
079import org.apache.activemq.store.kahadb.data.KahaCommitCommand;
080import org.apache.activemq.store.kahadb.data.KahaDestination;
081import org.apache.activemq.store.kahadb.data.KahaEntryType;
082import org.apache.activemq.store.kahadb.data.KahaPrepareCommand;
083import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand;
084import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand;
085import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand;
086import org.apache.activemq.store.kahadb.data.KahaRewrittenDataFileCommand;
087import org.apache.activemq.store.kahadb.data.KahaRollbackCommand;
088import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand;
089import org.apache.activemq.store.kahadb.data.KahaTraceCommand;
090import org.apache.activemq.store.kahadb.data.KahaTransactionInfo;
091import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand;
092import org.apache.activemq.store.kahadb.disk.index.BTreeIndex;
093import org.apache.activemq.store.kahadb.disk.index.BTreeVisitor;
094import org.apache.activemq.store.kahadb.disk.index.ListIndex;
095import org.apache.activemq.store.kahadb.disk.journal.DataFile;
096import org.apache.activemq.store.kahadb.disk.journal.Journal;
097import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
098import org.apache.activemq.store.kahadb.disk.journal.Location;
099import org.apache.activemq.store.kahadb.disk.journal.TargetedDataFileAppender;
100import org.apache.activemq.store.kahadb.disk.page.Page;
101import org.apache.activemq.store.kahadb.disk.page.PageFile;
102import org.apache.activemq.store.kahadb.disk.page.Transaction;
103import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller;
104import org.apache.activemq.store.kahadb.disk.util.LongMarshaller;
105import org.apache.activemq.store.kahadb.disk.util.Marshaller;
106import org.apache.activemq.store.kahadb.disk.util.Sequence;
107import org.apache.activemq.store.kahadb.disk.util.SequenceSet;
108import org.apache.activemq.store.kahadb.disk.util.StringMarshaller;
109import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
110import org.apache.activemq.util.ByteSequence;
111import org.apache.activemq.util.DataByteArrayInputStream;
112import org.apache.activemq.util.DataByteArrayOutputStream;
113import org.apache.activemq.util.IOExceptionSupport;
114import org.apache.activemq.util.IOHelper;
115import org.apache.activemq.util.ServiceStopper;
116import org.apache.activemq.util.ServiceSupport;
117import org.apache.activemq.util.ThreadPoolUtils;
118import org.slf4j.Logger;
119import org.slf4j.LoggerFactory;
120import org.slf4j.MDC;
121
122public abstract class MessageDatabase extends ServiceSupport implements BrokerServiceAware {
123
124    protected BrokerService brokerService;
125
126    public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME";
127    public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0);
128    public static final File DEFAULT_DIRECTORY = new File("KahaDB");
129    protected static final Buffer UNMATCHED;
130    static {
131        UNMATCHED = new Buffer(new byte[]{});
132    }
133    private static final Logger LOG = LoggerFactory.getLogger(MessageDatabase.class);
134
135    static final int CLOSED_STATE = 1;
136    static final int OPEN_STATE = 2;
137    static final long NOT_ACKED = -1;
138
139    static final int VERSION = 7;
140
141    static final byte COMPACTED_JOURNAL_FILE = DataFile.STANDARD_LOG_FILE + 1;
142
143    protected class Metadata {
144        protected Page<Metadata> page;
145        protected int state;
146        protected BTreeIndex<String, StoredDestination> destinations;
147        protected Location lastUpdate;
148        protected Location firstInProgressTransactionLocation;
149        protected Location producerSequenceIdTrackerLocation = null;
150        protected Location ackMessageFileMapLocation = null;
151        protected transient ActiveMQMessageAuditNoSync producerSequenceIdTracker = new ActiveMQMessageAuditNoSync();
152        protected transient Map<Integer, Set<Integer>> ackMessageFileMap = new HashMap<>();
153        protected transient AtomicBoolean ackMessageFileMapDirtyFlag = new AtomicBoolean(false);
154        protected int version = VERSION;
155        protected int openwireVersion = OpenWireFormat.DEFAULT_STORE_VERSION;
156
157        public void read(DataInput is) throws IOException {
158            state = is.readInt();
159            destinations = new BTreeIndex<>(pageFile, is.readLong());
160            if (is.readBoolean()) {
161                lastUpdate = LocationMarshaller.INSTANCE.readPayload(is);
162            } else {
163                lastUpdate = null;
164            }
165            if (is.readBoolean()) {
166                firstInProgressTransactionLocation = LocationMarshaller.INSTANCE.readPayload(is);
167            } else {
168                firstInProgressTransactionLocation = null;
169            }
170            try {
171                if (is.readBoolean()) {
172                    producerSequenceIdTrackerLocation = LocationMarshaller.INSTANCE.readPayload(is);
173                } else {
174                    producerSequenceIdTrackerLocation = null;
175                }
176            } catch (EOFException expectedOnUpgrade) {
177            }
178            try {
179                version = is.readInt();
180            } catch (EOFException expectedOnUpgrade) {
181                version = 1;
182            }
183            if (version >= 5 && is.readBoolean()) {
184                ackMessageFileMapLocation = LocationMarshaller.INSTANCE.readPayload(is);
185            } else {
186                ackMessageFileMapLocation = null;
187            }
188            try {
189                openwireVersion = is.readInt();
190            } catch (EOFException expectedOnUpgrade) {
191                openwireVersion = OpenWireFormat.DEFAULT_LEGACY_VERSION;
192            }
193
194            LOG.info("KahaDB is version " + version);
195        }
196
197        public void write(DataOutput os) throws IOException {
198            os.writeInt(state);
199            os.writeLong(destinations.getPageId());
200
201            if (lastUpdate != null) {
202                os.writeBoolean(true);
203                LocationMarshaller.INSTANCE.writePayload(lastUpdate, os);
204            } else {
205                os.writeBoolean(false);
206            }
207
208            if (firstInProgressTransactionLocation != null) {
209                os.writeBoolean(true);
210                LocationMarshaller.INSTANCE.writePayload(firstInProgressTransactionLocation, os);
211            } else {
212                os.writeBoolean(false);
213            }
214
215            if (producerSequenceIdTrackerLocation != null) {
216                os.writeBoolean(true);
217                LocationMarshaller.INSTANCE.writePayload(producerSequenceIdTrackerLocation, os);
218            } else {
219                os.writeBoolean(false);
220            }
221            os.writeInt(VERSION);
222            if (ackMessageFileMapLocation != null) {
223                os.writeBoolean(true);
224                LocationMarshaller.INSTANCE.writePayload(ackMessageFileMapLocation, os);
225            } else {
226                os.writeBoolean(false);
227            }
228            os.writeInt(this.openwireVersion);
229        }
230    }
231
232    class MetadataMarshaller extends VariableMarshaller<Metadata> {
233        @Override
234        public Metadata readPayload(DataInput dataIn) throws IOException {
235            Metadata rc = createMetadata();
236            rc.read(dataIn);
237            return rc;
238        }
239
240        @Override
241        public void writePayload(Metadata object, DataOutput dataOut) throws IOException {
242            object.write(dataOut);
243        }
244    }
245
246    public enum PurgeRecoveredXATransactionStrategy {
247        NEVER,
248        COMMIT,
249        ROLLBACK;
250    }
251
252    protected PageFile pageFile;
253    protected Journal journal;
254    protected Metadata metadata = new Metadata();
255    protected final PersistenceAdapterStatistics persistenceAdapterStatistics = new PersistenceAdapterStatistics();
256
257    protected MetadataMarshaller metadataMarshaller = new MetadataMarshaller();
258
259    protected boolean failIfDatabaseIsLocked;
260
261    protected boolean deleteAllMessages;
262    protected File directory = DEFAULT_DIRECTORY;
263    protected File indexDirectory = null;
264    protected ScheduledExecutorService scheduler;
265    private final Object schedulerLock = new Object();
266
267    protected JournalDiskSyncStrategy journalDiskSyncStrategy = JournalDiskSyncStrategy.ALWAYS;
268    protected boolean archiveDataLogs;
269    protected File directoryArchive;
270    protected AtomicLong journalSize = new AtomicLong(0);
271    long journalDiskSyncInterval = 1000;
272    long checkpointInterval = 5*1000;
273    long cleanupInterval = 30*1000;
274    boolean cleanupOnStop = true;
275    int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH;
276    int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE;
277    boolean enableIndexWriteAsync = false;
278    int setIndexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE;
279    private String preallocationScope = Journal.PreallocationScope.ENTIRE_JOURNAL.name();
280    private String preallocationStrategy = Journal.PreallocationStrategy.SPARSE_FILE.name();
281
282    protected AtomicBoolean opened = new AtomicBoolean();
283    private boolean ignoreMissingJournalfiles = false;
284    private int indexCacheSize = 10000;
285    private boolean checkForCorruptJournalFiles = false;
286    protected PurgeRecoveredXATransactionStrategy purgeRecoveredXATransactionStrategy = PurgeRecoveredXATransactionStrategy.NEVER;
287    private boolean checksumJournalFiles = true;
288    protected boolean forceRecoverIndex = false;
289    private boolean archiveCorruptedIndex = false;
290    private boolean useIndexLFRUEviction = false;
291    private float indexLFUEvictionFactor = 0.2f;
292    private boolean enableIndexDiskSyncs = true;
293    private boolean enableIndexRecoveryFile = true;
294    private boolean enableIndexPageCaching = true;
295    ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock();
296
297    private boolean enableAckCompaction = true;
298    private int compactAcksAfterNoGC = 10;
299    private boolean compactAcksIgnoresStoreGrowth = false;
300    private int checkPointCyclesWithNoGC;
301    private int journalLogOnLastCompactionCheck;
302    private boolean enableSubscriptionStatistics = false;
303
304    //only set when using JournalDiskSyncStrategy.PERIODIC
305    protected final AtomicReference<Location> lastAsyncJournalUpdate = new AtomicReference<>();
306
307    @Override
308    public void doStart() throws Exception {
309        load();
310    }
311
312    @Override
313    public void doStop(ServiceStopper stopper) throws Exception {
314        unload();
315    }
316
317    public void allowIOResumption() {
318        if (pageFile != null) {
319            pageFile.allowIOResumption();
320        }
321        if (journal != null) {
322            journal.allowIOResumption();
323        }
324    }
325
326    private void loadPageFile() throws IOException {
327        this.indexLock.writeLock().lock();
328        try {
329            final PageFile pageFile = getPageFile();
330            pageFile.load();
331            pageFile.tx().execute(new Transaction.Closure<IOException>() {
332                @Override
333                public void execute(Transaction tx) throws IOException {
334                    if (pageFile.getPageCount() == 0) {
335                        // First time this is created.. Initialize the metadata
336                        Page<Metadata> page = tx.allocate();
337                        assert page.getPageId() == 0;
338                        page.set(metadata);
339                        metadata.page = page;
340                        metadata.state = CLOSED_STATE;
341                        metadata.destinations = new BTreeIndex<>(pageFile, tx.allocate().getPageId());
342
343                        tx.store(metadata.page, metadataMarshaller, true);
344                    } else {
345                        Page<Metadata> page = tx.load(0, metadataMarshaller);
346                        metadata = page.get();
347                        metadata.page = page;
348                    }
349                    metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE);
350                    metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller());
351                    metadata.destinations.load(tx);
352                }
353            });
354            // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted.
355            // Perhaps we should just keep an index of file
356            storedDestinations.clear();
357            pageFile.tx().execute(new Transaction.Closure<IOException>() {
358                @Override
359                public void execute(Transaction tx) throws IOException {
360                    for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) {
361                        Entry<String, StoredDestination> entry = iterator.next();
362                        StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null);
363                        storedDestinations.put(entry.getKey(), sd);
364
365                        if (checkForCorruptJournalFiles) {
366                            // sanity check the index also
367                            if (!entry.getValue().locationIndex.isEmpty(tx)) {
368                                if (entry.getValue().orderIndex.nextMessageId <= 0) {
369                                    throw new IOException("Detected uninitialized orderIndex nextMessageId with pending messages for " + entry.getKey());
370                                }
371                            }
372                        }
373                    }
374                }
375            });
376            pageFile.flush();
377        } finally {
378            this.indexLock.writeLock().unlock();
379        }
380    }
381
382    private void startCheckpoint() {
383        if (checkpointInterval == 0 && cleanupInterval == 0) {
384            LOG.info("periodic checkpoint/cleanup disabled, will occur on clean " + (getCleanupOnStop() ? "shutdown/" : "") + "restart");
385            return;
386        }
387        synchronized (schedulerLock) {
388            if (scheduler == null || scheduler.isShutdown()) {
389                scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
390
391                    @Override
392                    public Thread newThread(Runnable r) {
393                        Thread schedulerThread = new Thread(r);
394
395                        schedulerThread.setName("ActiveMQ Journal Checkpoint Worker");
396                        schedulerThread.setDaemon(true);
397
398                        return schedulerThread;
399                    }
400                });
401
402                // Short intervals for check-point and cleanups
403                long delay;
404                if (journal.isJournalDiskSyncPeriodic()) {
405                    delay = Math.min(journalDiskSyncInterval > 0 ? journalDiskSyncInterval : checkpointInterval, 500);
406                } else {
407                    delay = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500);
408                }
409
410                scheduler.scheduleWithFixedDelay(new CheckpointRunner(), 0, delay, TimeUnit.MILLISECONDS);
411            }
412        }
413    }
414
415    private final class CheckpointRunner implements Runnable {
416
417        private long lastCheckpoint = System.currentTimeMillis();
418        private long lastCleanup = System.currentTimeMillis();
419        private long lastSync = System.currentTimeMillis();
420        private Location lastAsyncUpdate = null;
421
422        @Override
423        public void run() {
424            try {
425                // Decide on cleanup vs full checkpoint here.
426                if (opened.get()) {
427                    long now = System.currentTimeMillis();
428                    if (journal.isJournalDiskSyncPeriodic() &&
429                            journalDiskSyncInterval > 0 && (now - lastSync >= journalDiskSyncInterval)) {
430                        Location currentUpdate = lastAsyncJournalUpdate.get();
431                        if (currentUpdate != null && !currentUpdate.equals(lastAsyncUpdate)) {
432                            lastAsyncUpdate = currentUpdate;
433                            if (LOG.isTraceEnabled()) {
434                                LOG.trace("Writing trace command to trigger journal sync");
435                            }
436                            store(new KahaTraceCommand(), true, null, null);
437                        }
438                        lastSync = now;
439                    }
440                    if (cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval)) {
441                        checkpointCleanup(true);
442                        lastCleanup = now;
443                        lastCheckpoint = now;
444                    } else if (checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval)) {
445                        checkpointCleanup(false);
446                        lastCheckpoint = now;
447                    }
448                }
449            } catch (IOException ioe) {
450                LOG.error("Checkpoint failed", ioe);
451                brokerService.handleIOException(ioe);
452            } catch (Throwable e) {
453                LOG.error("Checkpoint failed", e);
454                brokerService.handleIOException(IOExceptionSupport.create(e));
455            }
456        }
457    }
458
459    public void open() throws IOException {
460        if( opened.compareAndSet(false, true) ) {
461            getJournal().start();
462            try {
463                loadPageFile();
464            } catch (Throwable t) {
465                LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t);
466                if (LOG.isDebugEnabled()) {
467                    LOG.debug("Index load failure", t);
468                }
469                // try to recover index
470                try {
471                    pageFile.unload();
472                } catch (Exception ignore) {}
473                if (archiveCorruptedIndex) {
474                    pageFile.archive();
475                } else {
476                    pageFile.delete();
477                }
478                metadata = createMetadata();
479                //The metadata was recreated after a detect corruption so we need to
480                //reconfigure anything that was configured on the old metadata on startup
481                configureMetadata();
482                pageFile = null;
483                loadPageFile();
484            }
485            recover();
486            startCheckpoint();
487        }
488    }
489
490    public void load() throws IOException {
491        this.indexLock.writeLock().lock();
492        try {
493            IOHelper.mkdirs(directory);
494            if (deleteAllMessages) {
495                getJournal().setCheckForCorruptionOnStartup(false);
496                getJournal().start();
497                getJournal().delete();
498                getJournal().close();
499                journal = null;
500                getPageFile().delete();
501                LOG.info("Persistence store purged.");
502                deleteAllMessages = false;
503            }
504
505            open();
506            store(new KahaTraceCommand().setMessage("LOADED " + new Date()));
507        } finally {
508            this.indexLock.writeLock().unlock();
509        }
510    }
511
512    public void close() throws IOException, InterruptedException {
513        if (opened.compareAndSet(true, false)) {
514            checkpointLock.writeLock().lock();
515            try {
516                if (metadata.page != null) {
517                    checkpointUpdate(getCleanupOnStop());
518                }
519                pageFile.unload();
520                metadata = createMetadata();
521            } finally {
522                checkpointLock.writeLock().unlock();
523            }
524            journal.close();
525            synchronized(schedulerLock) {
526                if (scheduler != null) {
527                    ThreadPoolUtils.shutdownGraceful(scheduler, -1);
528                    scheduler = null;
529                }
530            }
531            // clear the cache and journalSize on shutdown of the store
532            storeCache.clear();
533            journalSize.set(0);
534        }
535    }
536
537    public void unload() throws IOException, InterruptedException {
538        this.indexLock.writeLock().lock();
539        try {
540            if( pageFile != null && pageFile.isLoaded() ) {
541                metadata.state = CLOSED_STATE;
542                metadata.firstInProgressTransactionLocation = getInProgressTxLocationRange()[0];
543
544                if (metadata.page != null) {
545                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
546                        @Override
547                        public void execute(Transaction tx) throws IOException {
548                            tx.store(metadata.page, metadataMarshaller, true);
549                        }
550                    });
551                }
552            }
553        } finally {
554            this.indexLock.writeLock().unlock();
555        }
556        close();
557    }
558
559    // public for testing
560    @SuppressWarnings("rawtypes")
561    public Location[] getInProgressTxLocationRange() {
562        Location[] range = new Location[]{null, null};
563        synchronized (inflightTransactions) {
564            if (!inflightTransactions.isEmpty()) {
565                for (List<Operation> ops : inflightTransactions.values()) {
566                    if (!ops.isEmpty()) {
567                        trackMaxAndMin(range, ops);
568                    }
569                }
570            }
571            if (!preparedTransactions.isEmpty()) {
572                for (List<Operation> ops : preparedTransactions.values()) {
573                    if (!ops.isEmpty()) {
574                        trackMaxAndMin(range, ops);
575                    }
576                }
577            }
578        }
579        return range;
580    }
581
582    @SuppressWarnings("rawtypes")
583    private void trackMaxAndMin(Location[] range, List<Operation> ops) {
584        Location t = ops.get(0).getLocation();
585        if (range[0] == null || t.compareTo(range[0]) <= 0) {
586            range[0] = t;
587        }
588        t = ops.get(ops.size() -1).getLocation();
589        if (range[1] == null || t.compareTo(range[1]) >= 0) {
590            range[1] = t;
591        }
592    }
593
594    class TranInfo {
595        TransactionId id;
596        Location location;
597
598        class opCount {
599            int add;
600            int remove;
601        }
602        HashMap<KahaDestination, opCount> destinationOpCount = new HashMap<>();
603
604        @SuppressWarnings("rawtypes")
605        public void track(Operation operation) {
606            if (location == null ) {
607                location = operation.getLocation();
608            }
609            KahaDestination destination;
610            boolean isAdd = false;
611            if (operation instanceof AddOperation) {
612                AddOperation add = (AddOperation) operation;
613                destination = add.getCommand().getDestination();
614                isAdd = true;
615            } else {
616                RemoveOperation removeOpperation = (RemoveOperation) operation;
617                destination = removeOpperation.getCommand().getDestination();
618            }
619            opCount opCount = destinationOpCount.get(destination);
620            if (opCount == null) {
621                opCount = new opCount();
622                destinationOpCount.put(destination, opCount);
623            }
624            if (isAdd) {
625                opCount.add++;
626            } else {
627                opCount.remove++;
628            }
629        }
630
631        @Override
632        public String toString() {
633           StringBuffer buffer = new StringBuffer();
634           buffer.append(location).append(";").append(id).append(";\n");
635           for (Entry<KahaDestination, opCount> op : destinationOpCount.entrySet()) {
636               buffer.append(op.getKey()).append('+').append(op.getValue().add).append(',').append('-').append(op.getValue().remove).append(';');
637           }
638           return buffer.toString();
639        }
640    }
641
642    @SuppressWarnings("rawtypes")
643    public String getTransactions() {
644
645        ArrayList<TranInfo> infos = new ArrayList<>();
646        synchronized (inflightTransactions) {
647            if (!inflightTransactions.isEmpty()) {
648                for (Entry<TransactionId, List<Operation>> entry : inflightTransactions.entrySet()) {
649                    TranInfo info = new TranInfo();
650                    info.id = entry.getKey();
651                    for (Operation operation : entry.getValue()) {
652                        info.track(operation);
653                    }
654                    infos.add(info);
655                }
656            }
657        }
658        synchronized (preparedTransactions) {
659            if (!preparedTransactions.isEmpty()) {
660                for (Entry<TransactionId, List<Operation>> entry : preparedTransactions.entrySet()) {
661                    TranInfo info = new TranInfo();
662                    info.id = entry.getKey();
663                    for (Operation operation : entry.getValue()) {
664                        info.track(operation);
665                    }
666                    infos.add(info);
667                }
668            }
669        }
670        return infos.toString();
671    }
672
673    public String getPreparedTransaction(TransactionId transactionId) {
674        String result = "";
675        synchronized (preparedTransactions) {
676            List<Operation> operations = preparedTransactions.get(transactionId);
677            if (operations != null) {
678                TranInfo info = new TranInfo();
679                info.id = transactionId;
680                for (Operation operation : preparedTransactions.get(transactionId)) {
681                    info.track(operation);
682                }
683                result = info.toString();
684            }
685        }
686        return result;
687    }
688
689    /**
690     * Move all the messages that were in the journal into long term storage. We
691     * just replay and do a checkpoint.
692     *
693     * @throws IOException
694     * @throws IOException
695     * @throws IllegalStateException
696     */
697    private void recover() throws IllegalStateException, IOException {
698        this.indexLock.writeLock().lock();
699        try {
700
701            long start = System.currentTimeMillis();
702            boolean requiresJournalReplay = recoverProducerAudit();
703            requiresJournalReplay |= recoverAckMessageFileMap();
704            Location lastIndoubtPosition = getRecoveryPosition();
705            Location recoveryPosition = requiresJournalReplay ? journal.getNextLocation(null) : lastIndoubtPosition;
706            if (recoveryPosition != null) {
707                int redoCounter = 0;
708                int dataFileRotationTracker = recoveryPosition.getDataFileId();
709                LOG.info("Recovering from the journal @" + recoveryPosition);
710                while (recoveryPosition != null) {
711                    try {
712                        JournalCommand<?> message = load(recoveryPosition);
713                        metadata.lastUpdate = recoveryPosition;
714                        process(message, recoveryPosition, lastIndoubtPosition);
715                        redoCounter++;
716                    } catch (IOException failedRecovery) {
717                        if (isIgnoreMissingJournalfiles()) {
718                            LOG.debug("Failed to recover data at position:" + recoveryPosition, failedRecovery);
719                            // track this dud location
720                            journal.corruptRecoveryLocation(recoveryPosition);
721                        } else {
722                            throw new IOException("Failed to recover data at position:" + recoveryPosition, failedRecovery);
723                        }
724                    }
725                    recoveryPosition = journal.getNextLocation(recoveryPosition);
726                    // hold on to the minimum number of open files during recovery
727                    if (recoveryPosition != null && dataFileRotationTracker != recoveryPosition.getDataFileId()) {
728                        dataFileRotationTracker = recoveryPosition.getDataFileId();
729                        journal.cleanup();
730                    }
731                    if (LOG.isInfoEnabled() && redoCounter % 100000 == 0) {
732                        LOG.info("@" + recoveryPosition + ", " + redoCounter + " entries recovered ..");
733                    }
734                }
735                if (LOG.isInfoEnabled()) {
736                    long end = System.currentTimeMillis();
737                    LOG.info("Recovery replayed " + redoCounter + " operations from the journal in " + ((end - start) / 1000.0f) + " seconds.");
738                }
739            }
740
741            // We may have to undo some index updates.
742            pageFile.tx().execute(new Transaction.Closure<IOException>() {
743                @Override
744                public void execute(Transaction tx) throws IOException {
745                    recoverIndex(tx);
746                }
747            });
748
749            // rollback any recovered inflight local transactions, and discard any inflight XA transactions.
750            Set<TransactionId> toRollback = new HashSet<>();
751            Set<TransactionId> toDiscard = new HashSet<>();
752            synchronized (inflightTransactions) {
753                for (Iterator<TransactionId> it = inflightTransactions.keySet().iterator(); it.hasNext(); ) {
754                    TransactionId id = it.next();
755                    if (id.isLocalTransaction()) {
756                        toRollback.add(id);
757                    } else {
758                        toDiscard.add(id);
759                    }
760                }
761                for (TransactionId tx: toRollback) {
762                    if (LOG.isDebugEnabled()) {
763                        LOG.debug("rolling back recovered indoubt local transaction " + tx);
764                    }
765                    store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convertToLocal(tx)), false, null, null);
766                }
767                for (TransactionId tx: toDiscard) {
768                    if (LOG.isDebugEnabled()) {
769                        LOG.debug("discarding recovered in-flight XA transaction " + tx);
770                    }
771                    inflightTransactions.remove(tx);
772                }
773            }
774
775            synchronized (preparedTransactions) {
776                Set<TransactionId> txIds = new LinkedHashSet<TransactionId>(preparedTransactions.keySet());
777                for (TransactionId txId : txIds) {
778                    switch (purgeRecoveredXATransactionStrategy){
779                        case NEVER:
780                            LOG.warn("Recovered prepared XA TX: [{}]", txId);
781                            break;
782                        case COMMIT:
783                            store(new KahaCommitCommand().setTransactionInfo(TransactionIdConversion.convert(txId)), false, null, null);
784                            LOG.warn("Recovered and Committing prepared XA TX: [{}]", txId);
785                            break;
786                        case ROLLBACK:
787                            store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convert(txId)), false, null, null);
788                            LOG.warn("Recovered and Rolling Back prepared XA TX: [{}]", txId);
789                            break;
790                    }
791                }
792            }
793
794        } finally {
795            this.indexLock.writeLock().unlock();
796        }
797    }
798
799    @SuppressWarnings("unused")
800    private KahaTransactionInfo createLocalTransactionInfo(TransactionId tx) {
801        return TransactionIdConversion.convertToLocal(tx);
802    }
803
804    private Location minimum(Location x,
805                             Location y) {
806        Location min = null;
807        if (x != null) {
808            min = x;
809            if (y != null) {
810                int compare = y.compareTo(x);
811                if (compare < 0) {
812                    min = y;
813                }
814            }
815        } else {
816            min = y;
817        }
818        return min;
819    }
820
821    private boolean recoverProducerAudit() throws IOException {
822        boolean requiresReplay = true;
823        if (metadata.producerSequenceIdTrackerLocation != null) {
824            try {
825                KahaProducerAuditCommand audit = (KahaProducerAuditCommand) load(metadata.producerSequenceIdTrackerLocation);
826                ObjectInputStream objectIn = new MessageDatabaseObjectInputStream(audit.getAudit().newInput());
827                int maxNumProducers = getMaxFailoverProducersToTrack();
828                int maxAuditDepth = getFailoverProducersAuditDepth();
829                metadata.producerSequenceIdTracker = (ActiveMQMessageAuditNoSync) objectIn.readObject();
830                metadata.producerSequenceIdTracker.setAuditDepth(maxAuditDepth);
831                metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxNumProducers);
832                requiresReplay = false;
833            } catch (Exception e) {
834                LOG.warn("Cannot recover message audit", e);
835            }
836        }
837        // got no audit stored so got to recreate via replay from start of the journal
838        return requiresReplay;
839    }
840
841    @SuppressWarnings("unchecked")
842    private boolean recoverAckMessageFileMap() throws IOException {
843        boolean requiresReplay = true;
844        if (metadata.ackMessageFileMapLocation != null) {
845            try {
846                KahaAckMessageFileMapCommand audit = (KahaAckMessageFileMapCommand) load(metadata.ackMessageFileMapLocation);
847                ObjectInputStream objectIn = new MessageDatabaseObjectInputStream(audit.getAckMessageFileMap().newInput());
848                metadata.ackMessageFileMap = (Map<Integer, Set<Integer>>) objectIn.readObject();
849                metadata.ackMessageFileMapDirtyFlag.lazySet(true);
850                requiresReplay = false;
851            } catch (Exception e) {
852                LOG.warn("Cannot recover ackMessageFileMap", e);
853            }
854        }
855        // got no ackMessageFileMap stored so got to recreate via replay from start of the journal
856        return requiresReplay;
857    }
858
859    protected void recoverIndex(Transaction tx) throws IOException {
860        long start = System.currentTimeMillis();
861        // It is possible index updates got applied before the journal updates..
862        // in that case we need to removed references to messages that are not in the journal
863        final Location lastAppendLocation = journal.getLastAppendLocation();
864        long undoCounter=0;
865
866        // Go through all the destinations to see if they have messages past the lastAppendLocation
867        for (String key : storedDestinations.keySet()) {
868            StoredDestination sd = storedDestinations.get(key);
869
870            final ArrayList<Long> matches = new ArrayList<>();
871            // Find all the Locations that are >= than the last Append Location.
872            sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) {
873                @Override
874                protected void matched(Location key, Long value) {
875                    matches.add(value);
876                }
877            });
878
879            for (Long sequenceId : matches) {
880                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
881                if (keys != null) {
882                    sd.locationIndex.remove(tx, keys.location);
883                    sd.messageIdIndex.remove(tx, keys.messageId);
884                    metadata.producerSequenceIdTracker.rollback(keys.messageId);
885                    undoCounter++;
886                    decrementAndSubSizeToStoreStat(tx, key, sd, keys.location.getSize());
887                    // TODO: do we need to modify the ack positions for the pub sub case?
888                }
889            }
890        }
891
892        if (undoCounter > 0) {
893            // The rolledback operations are basically in flight journal writes.  To avoid getting
894            // these the end user should do sync writes to the journal.
895            if (LOG.isInfoEnabled()) {
896                long end = System.currentTimeMillis();
897                LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
898            }
899        }
900
901        undoCounter = 0;
902        start = System.currentTimeMillis();
903
904        // Lets be extra paranoid here and verify that all the datafiles being referenced
905        // by the indexes still exists.
906
907        final SequenceSet ss = new SequenceSet();
908        for (StoredDestination sd : storedDestinations.values()) {
909            // Use a visitor to cut down the number of pages that we load
910            sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
911                int last=-1;
912
913                @Override
914                public boolean isInterestedInKeysBetween(Location first, Location second) {
915                    if( first==null ) {
916                        return !ss.contains(0, second.getDataFileId());
917                    } else if( second==null ) {
918                        return true;
919                    } else {
920                        return !ss.contains(first.getDataFileId(), second.getDataFileId());
921                    }
922                }
923
924                @Override
925                public void visit(List<Location> keys, List<Long> values) {
926                    for (Location l : keys) {
927                        int fileId = l.getDataFileId();
928                        if( last != fileId ) {
929                            ss.add(fileId);
930                            last = fileId;
931                        }
932                    }
933                }
934
935            });
936        }
937        HashSet<Integer> missingJournalFiles = new HashSet<>();
938        while (!ss.isEmpty()) {
939            missingJournalFiles.add((int) ss.removeFirst());
940        }
941
942        for (Entry<Integer, Set<Integer>> entry : metadata.ackMessageFileMap.entrySet()) {
943            missingJournalFiles.add(entry.getKey());
944            for (Integer i : entry.getValue()) {
945                missingJournalFiles.add(i);
946            }
947        }
948
949        missingJournalFiles.removeAll(journal.getFileMap().keySet());
950
951        if (!missingJournalFiles.isEmpty()) {
952            LOG.warn("Some journal files are missing: " + missingJournalFiles);
953        }
954
955        ArrayList<BTreeVisitor.Predicate<Location>> knownCorruption = new ArrayList<>();
956        ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<>();
957        for (Integer missing : missingJournalFiles) {
958            missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0)));
959        }
960
961        if (checkForCorruptJournalFiles) {
962            Collection<DataFile> dataFiles = journal.getFileMap().values();
963            for (DataFile dataFile : dataFiles) {
964                int id = dataFile.getDataFileId();
965                // eof to next file id
966                missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0)));
967                Sequence seq = dataFile.getCorruptedBlocks().getHead();
968                while (seq != null) {
969                    BTreeVisitor.BetweenVisitor<Location, Long> visitor =
970                        new BTreeVisitor.BetweenVisitor<>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1));
971                    missingPredicates.add(visitor);
972                    knownCorruption.add(visitor);
973                    seq = seq.getNext();
974                }
975            }
976        }
977
978        if (!missingPredicates.isEmpty()) {
979            for (Entry<String, StoredDestination> sdEntry : storedDestinations.entrySet()) {
980                final StoredDestination sd = sdEntry.getValue();
981                final LinkedHashMap<Long, Location> matches = new LinkedHashMap<>();
982                sd.locationIndex.visit(tx, new BTreeVisitor.OrVisitor<Location, Long>(missingPredicates) {
983                    @Override
984                    protected void matched(Location key, Long value) {
985                        matches.put(value, key);
986                    }
987                });
988
989                // If some message references are affected by the missing data files...
990                if (!matches.isEmpty()) {
991
992                    // We either 'gracefully' recover dropping the missing messages or
993                    // we error out.
994                    if( ignoreMissingJournalfiles ) {
995                        // Update the index to remove the references to the missing data
996                        for (Long sequenceId : matches.keySet()) {
997                            MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
998                            sd.locationIndex.remove(tx, keys.location);
999                            sd.messageIdIndex.remove(tx, keys.messageId);
1000                            LOG.info("[" + sdEntry.getKey() + "] dropped: " + keys.messageId + " at corrupt location: " + keys.location);
1001                            undoCounter++;
1002                            decrementAndSubSizeToStoreStat(tx, sdEntry.getKey(), sdEntry.getValue(), keys.location.getSize());
1003                            // TODO: do we need to modify the ack positions for the pub sub case?
1004                        }
1005                    } else {
1006                        LOG.error("[" + sdEntry.getKey() + "] references corrupt locations: " + matches);
1007                        throw new IOException("Detected missing/corrupt journal files referenced by:[" + sdEntry.getKey() + "] " +matches.size()+" messages affected.");
1008                    }
1009                }
1010            }
1011        }
1012
1013        if (!ignoreMissingJournalfiles) {
1014            if (!knownCorruption.isEmpty()) {
1015                LOG.error("Detected corrupt journal files. " + knownCorruption);
1016                throw new IOException("Detected corrupt journal files. " + knownCorruption);
1017            }
1018
1019            if (!missingJournalFiles.isEmpty()) {
1020                LOG.error("Detected missing journal files. " + missingJournalFiles);
1021                throw new IOException("Detected missing journal files. " + missingJournalFiles);
1022            }
1023        }
1024
1025        if (undoCounter > 0) {
1026            // The rolledback operations are basically in flight journal writes.  To avoid getting these the end user
1027            // should do sync writes to the journal.
1028            if (LOG.isInfoEnabled()) {
1029                long end = System.currentTimeMillis();
1030                LOG.info("Detected missing/corrupt journal files.  Dropped " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
1031            }
1032        }
1033    }
1034
1035    private Location nextRecoveryPosition;
1036    private Location lastRecoveryPosition;
1037
1038    public void incrementalRecover() throws IOException {
1039        this.indexLock.writeLock().lock();
1040        try {
1041            if( nextRecoveryPosition == null ) {
1042                if( lastRecoveryPosition==null ) {
1043                    nextRecoveryPosition = getRecoveryPosition();
1044                } else {
1045                    nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition);
1046                }
1047            }
1048            while (nextRecoveryPosition != null) {
1049                lastRecoveryPosition = nextRecoveryPosition;
1050                metadata.lastUpdate = lastRecoveryPosition;
1051                JournalCommand<?> message = load(lastRecoveryPosition);
1052                process(message, lastRecoveryPosition, (IndexAware) null);
1053                nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition);
1054            }
1055        } finally {
1056            this.indexLock.writeLock().unlock();
1057        }
1058    }
1059
1060    public Location getLastUpdatePosition() throws IOException {
1061        return metadata.lastUpdate;
1062    }
1063
1064    private Location getRecoveryPosition() throws IOException {
1065
1066        if (!this.forceRecoverIndex) {
1067
1068            // If we need to recover the transactions..
1069            if (metadata.firstInProgressTransactionLocation != null) {
1070                return metadata.firstInProgressTransactionLocation;
1071            }
1072
1073            // Perhaps there were no transactions...
1074            if( metadata.lastUpdate!=null) {
1075                // Start replay at the record after the last one recorded in the index file.
1076                return getNextInitializedLocation(metadata.lastUpdate);
1077            }
1078        }
1079        // This loads the first position.
1080        return journal.getNextLocation(null);
1081    }
1082
1083    private Location getNextInitializedLocation(Location location) throws IOException {
1084        Location mayNotBeInitialized = journal.getNextLocation(location);
1085        if (location.getSize() == NOT_SET && mayNotBeInitialized != null && mayNotBeInitialized.getSize() != NOT_SET) {
1086            // need to init size and type to skip
1087            return journal.getNextLocation(mayNotBeInitialized);
1088        } else {
1089            return mayNotBeInitialized;
1090        }
1091    }
1092
1093    protected void checkpointCleanup(final boolean cleanup) throws IOException {
1094        long start;
1095        this.indexLock.writeLock().lock();
1096        try {
1097            start = System.currentTimeMillis();
1098            if( !opened.get() ) {
1099                return;
1100            }
1101        } finally {
1102            this.indexLock.writeLock().unlock();
1103        }
1104        checkpointUpdate(cleanup);
1105        long totalTimeMillis = System.currentTimeMillis() - start;
1106        if (LOG_SLOW_ACCESS_TIME > 0 && totalTimeMillis > LOG_SLOW_ACCESS_TIME) {
1107            if (LOG.isInfoEnabled()) {
1108                LOG.info("Slow KahaDB access: cleanup took " + totalTimeMillis);
1109            }
1110            persistenceAdapterStatistics.addSlowCleanupTime(totalTimeMillis);
1111        }
1112    }
1113
1114    public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException {
1115        int size = data.serializedSizeFramed();
1116        DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
1117        os.writeByte(data.type().getNumber());
1118        data.writeFramed(os);
1119        return os.toByteSequence();
1120    }
1121
1122    // /////////////////////////////////////////////////////////////////
1123    // Methods call by the broker to update and query the store.
1124    // /////////////////////////////////////////////////////////////////
1125    public Location store(JournalCommand<?> data) throws IOException {
1126        return store(data, false, null,null);
1127    }
1128
1129    public Location store(JournalCommand<?> data, Runnable onJournalStoreComplete) throws IOException {
1130        return store(data, false, null, null, onJournalStoreComplete);
1131    }
1132
1133    public Location store(JournalCommand<?> data, boolean sync, IndexAware before,Runnable after) throws IOException {
1134        return store(data, sync, before, after, null);
1135    }
1136
1137    /**
1138     * All updated are are funneled through this method. The updates are converted
1139     * to a JournalMessage which is logged to the journal and then the data from
1140     * the JournalMessage is used to update the index just like it would be done
1141     * during a recovery process.
1142     */
1143    public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException {
1144        try {
1145            ByteSequence sequence = toByteSequence(data);
1146            Location location;
1147
1148            checkpointLock.readLock().lock();
1149            try {
1150
1151                long start = System.currentTimeMillis();
1152                location = onJournalStoreComplete == null ? journal.write(sequence, sync) : journal.write(sequence, onJournalStoreComplete) ;
1153                long start2 = System.currentTimeMillis();
1154                //Track the last async update so we know if we need to sync at the next checkpoint
1155                if (!sync && journal.isJournalDiskSyncPeriodic()) {
1156                    lastAsyncJournalUpdate.set(location);
1157                }
1158                process(data, location, before);
1159
1160                long end = System.currentTimeMillis();
1161                long totalTimeMillis = end - start;
1162                if (LOG_SLOW_ACCESS_TIME > 0 && totalTimeMillis > LOG_SLOW_ACCESS_TIME) {
1163                    if (LOG.isInfoEnabled()) {
1164                        LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms");
1165                    }
1166                    persistenceAdapterStatistics.addSlowWriteTime(totalTimeMillis);
1167                }
1168
1169                persistenceAdapterStatistics.addWriteTime(totalTimeMillis);
1170
1171            } finally {
1172                checkpointLock.readLock().unlock();
1173            }
1174
1175            if (after != null) {
1176                after.run();
1177            }
1178
1179            return location;
1180        } catch (IOException ioe) {
1181            LOG.error("KahaDB failed to store to Journal, command of type: " + data.type(), ioe);
1182            brokerService.handleIOException(ioe);
1183            throw ioe;
1184        }
1185    }
1186
1187    /**
1188     * Loads a previously stored JournalMessage
1189     *
1190     * @param location
1191     * @return
1192     * @throws IOException
1193     */
1194    public JournalCommand<?> load(Location location) throws IOException {
1195        long start = System.currentTimeMillis();
1196        ByteSequence data = journal.read(location);
1197        long totalTimeMillis = System.currentTimeMillis() - start;
1198        if( LOG_SLOW_ACCESS_TIME>0 && totalTimeMillis > LOG_SLOW_ACCESS_TIME) {
1199            if (LOG.isInfoEnabled()) {
1200                LOG.info("Slow KahaDB access: Journal read took: "+ totalTimeMillis +" ms");
1201            }
1202            persistenceAdapterStatistics.addSlowReadTime(totalTimeMillis);
1203        }
1204
1205        persistenceAdapterStatistics.addReadTime(totalTimeMillis);
1206
1207        DataByteArrayInputStream is = new DataByteArrayInputStream(data);
1208        byte readByte = is.readByte();
1209        KahaEntryType type = KahaEntryType.valueOf(readByte);
1210        if( type == null ) {
1211            try {
1212                is.close();
1213            } catch (IOException e) {}
1214            throw new IOException("Could not load journal record, null type information from: " + readByte + " at location: "+location);
1215        }
1216        JournalCommand<?> message = (JournalCommand<?>)type.createMessage();
1217        message.mergeFramed(is);
1218        return message;
1219    }
1220
1221    /**
1222     * do minimal recovery till we reach the last inDoubtLocation
1223     * @param data
1224     * @param location
1225     * @param inDoubtlocation
1226     * @throws IOException
1227     */
1228    void process(JournalCommand<?> data, final Location location, final Location inDoubtlocation) throws IOException {
1229        if (inDoubtlocation != null && location.compareTo(inDoubtlocation) >= 0) {
1230            initMessageStore(data);
1231            process(data, location, (IndexAware) null);
1232        } else {
1233            // just recover producer audit
1234            data.visit(new Visitor() {
1235                @Override
1236                public void visit(KahaAddMessageCommand command) throws IOException {
1237                    metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId());
1238                }
1239            });
1240        }
1241    }
1242
1243    private void initMessageStore(JournalCommand<?> data) throws IOException {
1244        data.visit(new Visitor() {
1245            @Override
1246            public void visit(KahaAddMessageCommand command) throws IOException {
1247                final KahaDestination destination = command.getDestination();
1248                if (!storedDestinations.containsKey(key(destination))) {
1249                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
1250                        @Override
1251                        public void execute(Transaction tx) throws IOException {
1252                            getStoredDestination(destination, tx);
1253                        }
1254                    });
1255                }
1256            }
1257        });
1258    }
1259
1260    // /////////////////////////////////////////////////////////////////
1261    // Journaled record processing methods. Once the record is journaled,
1262    // these methods handle applying the index updates. These may be called
1263    // from the recovery method too so they need to be idempotent
1264    // /////////////////////////////////////////////////////////////////
1265
1266    void process(JournalCommand<?> data, final Location location, final IndexAware onSequenceAssignedCallback) throws IOException {
1267        data.visit(new Visitor() {
1268            @Override
1269            public void visit(KahaAddMessageCommand command) throws IOException {
1270                process(command, location, onSequenceAssignedCallback);
1271            }
1272
1273            @Override
1274            public void visit(KahaRemoveMessageCommand command) throws IOException {
1275                process(command, location);
1276            }
1277
1278            @Override
1279            public void visit(KahaPrepareCommand command) throws IOException {
1280                process(command, location);
1281            }
1282
1283            @Override
1284            public void visit(KahaCommitCommand command) throws IOException {
1285                process(command, location, onSequenceAssignedCallback);
1286            }
1287
1288            @Override
1289            public void visit(KahaRollbackCommand command) throws IOException {
1290                process(command, location);
1291            }
1292
1293            @Override
1294            public void visit(KahaRemoveDestinationCommand command) throws IOException {
1295                process(command, location);
1296            }
1297
1298            @Override
1299            public void visit(KahaSubscriptionCommand command) throws IOException {
1300                process(command, location);
1301            }
1302
1303            @Override
1304            public void visit(KahaProducerAuditCommand command) throws IOException {
1305                processLocation(location);
1306            }
1307
1308            @Override
1309            public void visit(KahaAckMessageFileMapCommand command) throws IOException {
1310                processLocation(location);
1311            }
1312
1313            @Override
1314            public void visit(KahaTraceCommand command) {
1315                processLocation(location);
1316            }
1317
1318            @Override
1319            public void visit(KahaUpdateMessageCommand command) throws IOException {
1320                process(command, location);
1321            }
1322
1323            @Override
1324            public void visit(KahaRewrittenDataFileCommand command) throws IOException {
1325                process(command, location);
1326            }
1327        });
1328    }
1329
1330    @SuppressWarnings("rawtypes")
1331    protected void process(final KahaAddMessageCommand command, final Location location, final IndexAware runWithIndexLock) throws IOException {
1332        if (command.hasTransactionInfo()) {
1333            List<Operation> inflightTx = getInflightTx(command.getTransactionInfo());
1334            inflightTx.add(new AddOperation(command, location, runWithIndexLock));
1335        } else {
1336            this.indexLock.writeLock().lock();
1337            try {
1338                pageFile.tx().execute(new Transaction.Closure<IOException>() {
1339                    @Override
1340                    public void execute(Transaction tx) throws IOException {
1341                        long assignedIndex = updateIndex(tx, command, location);
1342                        if (runWithIndexLock != null) {
1343                            runWithIndexLock.sequenceAssignedWithIndexLocked(assignedIndex);
1344                        }
1345                    }
1346                });
1347
1348            } finally {
1349                this.indexLock.writeLock().unlock();
1350            }
1351        }
1352    }
1353
1354    protected void process(final KahaUpdateMessageCommand command, final Location location) throws IOException {
1355        this.indexLock.writeLock().lock();
1356        try {
1357            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1358                @Override
1359                public void execute(Transaction tx) throws IOException {
1360                    updateIndex(tx, command, location);
1361                }
1362            });
1363        } finally {
1364            this.indexLock.writeLock().unlock();
1365        }
1366    }
1367
1368    @SuppressWarnings("rawtypes")
1369    protected void process(final KahaRemoveMessageCommand command, final Location location) throws IOException {
1370        if (command.hasTransactionInfo()) {
1371           List<Operation> inflightTx = getInflightTx(command.getTransactionInfo());
1372           inflightTx.add(new RemoveOperation(command, location));
1373        } else {
1374            this.indexLock.writeLock().lock();
1375            try {
1376                pageFile.tx().execute(new Transaction.Closure<IOException>() {
1377                    @Override
1378                    public void execute(Transaction tx) throws IOException {
1379                        updateIndex(tx, command, location);
1380                    }
1381                });
1382            } finally {
1383                this.indexLock.writeLock().unlock();
1384            }
1385        }
1386    }
1387
1388    protected void process(final KahaRemoveDestinationCommand command, final Location location) throws IOException {
1389        this.indexLock.writeLock().lock();
1390        try {
1391            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1392                @Override
1393                public void execute(Transaction tx) throws IOException {
1394                    updateIndex(tx, command, location);
1395                }
1396            });
1397        } finally {
1398            this.indexLock.writeLock().unlock();
1399        }
1400    }
1401
1402    protected void process(final KahaSubscriptionCommand command, final Location location) throws IOException {
1403        this.indexLock.writeLock().lock();
1404        try {
1405            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1406                @Override
1407                public void execute(Transaction tx) throws IOException {
1408                    updateIndex(tx, command, location);
1409                }
1410            });
1411        } finally {
1412            this.indexLock.writeLock().unlock();
1413        }
1414    }
1415
1416    protected void processLocation(final Location location) {
1417        this.indexLock.writeLock().lock();
1418        try {
1419            metadata.lastUpdate = location;
1420        } finally {
1421            this.indexLock.writeLock().unlock();
1422        }
1423    }
1424
1425    @SuppressWarnings("rawtypes")
1426    protected void process(KahaCommitCommand command, final Location location, final IndexAware before) throws IOException {
1427        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1428        List<Operation> inflightTx;
1429        synchronized (inflightTransactions) {
1430            inflightTx = inflightTransactions.remove(key);
1431            if (inflightTx == null) {
1432                inflightTx = preparedTransactions.remove(key);
1433            }
1434        }
1435        if (inflightTx == null) {
1436            // only non persistent messages in this tx
1437            if (before != null) {
1438                before.sequenceAssignedWithIndexLocked(-1);
1439            }
1440            // Moving the checkpoint pointer as there is no persistent operations in this transaction to be replayed
1441            processLocation(location);
1442            return;
1443        }
1444
1445        final List<Operation> messagingTx = inflightTx;
1446        indexLock.writeLock().lock();
1447        try {
1448            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1449                @Override
1450                public void execute(Transaction tx) throws IOException {
1451                    for (Operation op : messagingTx) {
1452                        op.execute(tx);
1453                        recordAckMessageReferenceLocation(location, op.getLocation());
1454                    }
1455                }
1456            });
1457            metadata.lastUpdate = location;
1458        } finally {
1459            indexLock.writeLock().unlock();
1460        }
1461    }
1462
1463    @SuppressWarnings("rawtypes")
1464    protected void process(KahaPrepareCommand command, Location location) {
1465        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1466        List<Operation> tx = null;
1467        synchronized (inflightTransactions) {
1468            tx = inflightTransactions.remove(key);
1469            if (tx != null) {
1470                preparedTransactions.put(key, tx);
1471            }
1472        }
1473        if (tx != null && !tx.isEmpty()) {
1474            indexLock.writeLock().lock();
1475            try {
1476                for (Operation op : tx) {
1477                    recordAckMessageReferenceLocation(location, op.getLocation());
1478                }
1479            } finally {
1480                indexLock.writeLock().unlock();
1481            }
1482        }
1483    }
1484
1485    @SuppressWarnings("rawtypes")
1486    protected void process(KahaRollbackCommand command, Location location)  throws IOException {
1487        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1488        List<Operation> updates = null;
1489        synchronized (inflightTransactions) {
1490            updates = inflightTransactions.remove(key);
1491            if (updates == null) {
1492                updates = preparedTransactions.remove(key);
1493            }
1494        }
1495        if (key.isXATransaction() && updates != null && !updates.isEmpty()) {
1496            indexLock.writeLock().lock();
1497            try {
1498                for (Operation op : updates) {
1499                    recordAckMessageReferenceLocation(location, op.getLocation());
1500                }
1501            } finally {
1502                indexLock.writeLock().unlock();
1503            }
1504        }
1505    }
1506
1507    protected void process(KahaRewrittenDataFileCommand command, Location location)  throws IOException {
1508        final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet());
1509
1510        // Mark the current journal file as a compacted file so that gc checks can skip
1511        // over logs that are smaller compaction type logs.
1512        DataFile current = journal.getDataFileById(location.getDataFileId());
1513        current.setTypeCode(command.getRewriteType());
1514
1515        if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) {
1516            // Move offset so that next location read jumps to next file.
1517            location.setOffset(journalMaxFileLength);
1518        }
1519    }
1520
1521    // /////////////////////////////////////////////////////////////////
1522    // These methods do the actual index updates.
1523    // /////////////////////////////////////////////////////////////////
1524
1525    protected final ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock();
1526    private final HashSet<Integer> journalFilesBeingReplicated = new HashSet<>();
1527
1528    long updateIndex(Transaction tx, KahaAddMessageCommand command, Location location) throws IOException {
1529        StoredDestination sd = getExistingStoredDestination(command.getDestination(), tx);
1530        if (sd == null) {
1531            // if the store no longer exists, skip
1532            return -1;
1533        }
1534        // Skip adding the message to the index if this is a topic and there are
1535        // no subscriptions.
1536        if (sd.subscriptions != null && sd.subscriptions.isEmpty(tx)) {
1537            return -1;
1538        }
1539
1540        // Add the message.
1541        int priority = command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY;
1542        long id = sd.orderIndex.getNextMessageId();
1543        Long previous = sd.locationIndex.put(tx, location, id);
1544        if (previous == null) {
1545            previous = sd.messageIdIndex.put(tx, command.getMessageId(), id);
1546            if (previous == null) {
1547                incrementAndAddSizeToStoreStat(tx, command.getDestination(), location.getSize());
1548                sd.orderIndex.put(tx, priority, id, new MessageKeys(command.getMessageId(), location));
1549                if (sd.subscriptions != null && !sd.subscriptions.isEmpty(tx)) {
1550                    addAckLocationForNewMessage(tx, command.getDestination(), sd, id);
1551                }
1552                metadata.lastUpdate = location;
1553            } else {
1554
1555                MessageKeys messageKeys = sd.orderIndex.get(tx, previous);
1556                if (messageKeys != null && messageKeys.location.compareTo(location) < 0) {
1557                    // If the message ID is indexed, then the broker asked us to store a duplicate before the message was dispatched and acked, we ignore this add attempt
1558                    LOG.warn("Duplicate message add attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId());
1559                }
1560                sd.messageIdIndex.put(tx, command.getMessageId(), previous);
1561                sd.locationIndex.remove(tx, location);
1562                id = -1;
1563            }
1564        } else {
1565            // restore the previous value.. Looks like this was a redo of a previously
1566            // added message. We don't want to assign it a new id as the other indexes would
1567            // be wrong..
1568            sd.locationIndex.put(tx, location, previous);
1569            // ensure sequence is not broken
1570            sd.orderIndex.revertNextMessageId();
1571            metadata.lastUpdate = location;
1572        }
1573        // record this id in any event, initial send or recovery
1574        metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId());
1575
1576       return id;
1577    }
1578
1579    void trackPendingAdd(KahaDestination destination, Long seq) {
1580        StoredDestination sd = storedDestinations.get(key(destination));
1581        if (sd != null) {
1582            sd.trackPendingAdd(seq);
1583        }
1584    }
1585
1586    void trackPendingAddComplete(KahaDestination destination, Long seq) {
1587        StoredDestination sd = storedDestinations.get(key(destination));
1588        if (sd != null) {
1589            sd.trackPendingAddComplete(seq);
1590        }
1591    }
1592
1593    void updateIndex(Transaction tx, KahaUpdateMessageCommand updateMessageCommand, Location location) throws IOException {
1594        KahaAddMessageCommand command = updateMessageCommand.getMessage();
1595        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1596
1597        Long id = sd.messageIdIndex.get(tx, command.getMessageId());
1598        if (id != null) {
1599            MessageKeys previousKeys = sd.orderIndex.put(
1600                    tx,
1601                    command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY,
1602                    id,
1603                    new MessageKeys(command.getMessageId(), location)
1604            );
1605            sd.locationIndex.put(tx, location, id);
1606            incrementAndAddSizeToStoreStat(tx, command.getDestination(), location.getSize());
1607
1608            if (previousKeys != null) {
1609                //Remove the existing from the size
1610                decrementAndSubSizeToStoreStat(tx, command.getDestination(), previousKeys.location.getSize());
1611
1612                //update all the subscription metrics
1613                if (enableSubscriptionStatistics && sd.ackPositions != null && location.getSize() != previousKeys.location.getSize()) {
1614                    Iterator<Entry<String, SequenceSet>> iter = sd.ackPositions.iterator(tx);
1615                    while (iter.hasNext()) {
1616                        Entry<String, SequenceSet> e = iter.next();
1617                        if (e.getValue().contains(id)) {
1618                            incrementAndAddSizeToStoreStat(key(command.getDestination()), e.getKey(), location.getSize());
1619                            decrementAndSubSizeToStoreStat(key(command.getDestination()), e.getKey(), previousKeys.location.getSize());
1620                        }
1621                    }
1622                }
1623
1624                // on first update previous is original location, on recovery/replay it may be the updated location
1625                if(!previousKeys.location.equals(location)) {
1626                    sd.locationIndex.remove(tx, previousKeys.location);
1627                }
1628            }
1629            metadata.lastUpdate = location;
1630        } else {
1631            //Add the message if it can't be found
1632            this.updateIndex(tx, command, location);
1633        }
1634    }
1635
1636    void updateIndex(Transaction tx, KahaRemoveMessageCommand command, Location ackLocation) throws IOException {
1637        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1638        if (!command.hasSubscriptionKey()) {
1639
1640            // In the queue case we just remove the message from the index..
1641            Long sequenceId = sd.messageIdIndex.remove(tx, command.getMessageId());
1642            if (sequenceId != null) {
1643                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
1644                if (keys != null) {
1645                    sd.locationIndex.remove(tx, keys.location);
1646                    decrementAndSubSizeToStoreStat(tx, command.getDestination(), keys.location.getSize());
1647                    recordAckMessageReferenceLocation(ackLocation, keys.location);
1648                    metadata.lastUpdate = ackLocation;
1649                }  else if (LOG.isDebugEnabled()) {
1650                    LOG.debug("message not found in order index: " + sequenceId  + " for: " + command.getMessageId());
1651                }
1652            } else if (LOG.isDebugEnabled()) {
1653                LOG.debug("message not found in sequence id index: " + command.getMessageId());
1654            }
1655        } else {
1656            // In the topic case we need remove the message once it's been acked
1657            // by all the subs
1658            Long sequence = sd.messageIdIndex.get(tx, command.getMessageId());
1659
1660            // Make sure it's a valid message id...
1661            if (sequence != null) {
1662                String subscriptionKey = command.getSubscriptionKey();
1663                if (command.getAck() != UNMATCHED) {
1664                    sd.orderIndex.get(tx, sequence);
1665                    byte priority = sd.orderIndex.lastGetPriority();
1666                    sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(sequence, priority));
1667                }
1668
1669                MessageKeys keys = sd.orderIndex.get(tx, sequence);
1670                if (keys != null) {
1671                    recordAckMessageReferenceLocation(ackLocation, keys.location);
1672                }
1673                // The following method handles deleting un-referenced messages.
1674                removeAckLocation(command, tx, sd, subscriptionKey, sequence);
1675                metadata.lastUpdate = ackLocation;
1676            } else if (LOG.isDebugEnabled()) {
1677                LOG.debug("on ack, no message sequence exists for id: " + command.getMessageId() + " and sub: " + command.getSubscriptionKey());
1678            }
1679
1680        }
1681    }
1682
1683    private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) {
1684        Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId()));
1685        if (referenceFileIds == null) {
1686            referenceFileIds = new HashSet<>();
1687            referenceFileIds.add(messageLocation.getDataFileId());
1688            metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds);
1689            metadata.ackMessageFileMapDirtyFlag.lazySet(true);
1690
1691        } else {
1692            Integer id = Integer.valueOf(messageLocation.getDataFileId());
1693            if (!referenceFileIds.contains(id)) {
1694                referenceFileIds.add(id);
1695            }
1696        }
1697    }
1698
1699    void updateIndex(Transaction tx, KahaRemoveDestinationCommand command, Location location) throws IOException {
1700        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1701        sd.orderIndex.remove(tx);
1702
1703        sd.locationIndex.clear(tx);
1704        sd.locationIndex.unload(tx);
1705        tx.free(sd.locationIndex.getPageId());
1706
1707        sd.messageIdIndex.clear(tx);
1708        sd.messageIdIndex.unload(tx);
1709        tx.free(sd.messageIdIndex.getPageId());
1710
1711        tx.free(sd.messageStoreStatistics.getPageId());
1712        sd.messageStoreStatistics = null;
1713
1714        if (sd.subscriptions != null) {
1715            sd.subscriptions.clear(tx);
1716            sd.subscriptions.unload(tx);
1717            tx.free(sd.subscriptions.getPageId());
1718
1719            sd.subscriptionAcks.clear(tx);
1720            sd.subscriptionAcks.unload(tx);
1721            tx.free(sd.subscriptionAcks.getPageId());
1722
1723            sd.ackPositions.clear(tx);
1724            sd.ackPositions.unload(tx);
1725            tx.free(sd.ackPositions.getHeadPageId());
1726
1727            sd.subLocations.clear(tx);
1728            sd.subLocations.unload(tx);
1729            tx.free(sd.subLocations.getHeadPageId());
1730        }
1731
1732        String key = key(command.getDestination());
1733        storedDestinations.remove(key);
1734        metadata.destinations.remove(tx, key);
1735        clearStoreStats(command.getDestination());
1736        storeCache.remove(key(command.getDestination()));
1737    }
1738
1739    void updateIndex(Transaction tx, KahaSubscriptionCommand command, Location location) throws IOException {
1740        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1741        final String subscriptionKey = command.getSubscriptionKey();
1742
1743        // If set then we are creating it.. otherwise we are destroying the sub
1744        if (command.hasSubscriptionInfo()) {
1745            Location existing = sd.subLocations.get(tx, subscriptionKey);
1746            if (existing != null && existing.compareTo(location) == 0) {
1747                // replay on recovery, ignore
1748                LOG.trace("ignoring journal replay of replay of sub from: " + location);
1749                return;
1750            }
1751
1752            sd.subscriptions.put(tx, subscriptionKey, command);
1753            sd.subLocations.put(tx, subscriptionKey, location);
1754            long ackLocation=NOT_ACKED;
1755            if (!command.getRetroactive()) {
1756                ackLocation = sd.orderIndex.nextMessageId-1;
1757            } else {
1758                addAckLocationForRetroactiveSub(tx, sd, subscriptionKey);
1759            }
1760            sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(ackLocation));
1761            sd.subscriptionCache.add(subscriptionKey);
1762        } else {
1763            // delete the sub...
1764            sd.subscriptions.remove(tx, subscriptionKey);
1765            sd.subLocations.remove(tx, subscriptionKey);
1766            sd.subscriptionAcks.remove(tx, subscriptionKey);
1767            sd.subscriptionCache.remove(subscriptionKey);
1768            removeAckLocationsForSub(command, tx, sd, subscriptionKey);
1769            MessageStoreSubscriptionStatistics subStats = getSubStats(key(command.getDestination()));
1770            if (subStats != null) {
1771                subStats.removeSubscription(subscriptionKey);
1772            }
1773
1774            if (sd.subscriptions.isEmpty(tx)) {
1775                // remove the stored destination
1776                KahaRemoveDestinationCommand removeDestinationCommand = new KahaRemoveDestinationCommand();
1777                removeDestinationCommand.setDestination(command.getDestination());
1778                updateIndex(tx, removeDestinationCommand, null);
1779                clearStoreStats(command.getDestination());
1780            }
1781        }
1782    }
1783
1784    private void checkpointUpdate(final boolean cleanup) throws IOException {
1785        checkpointLock.writeLock().lock();
1786        try {
1787            this.indexLock.writeLock().lock();
1788            try {
1789                Set<Integer> filesToGc = pageFile.tx().execute(new Transaction.CallableClosure<Set<Integer>, IOException>() {
1790                    @Override
1791                    public Set<Integer> execute(Transaction tx) throws IOException {
1792                        return checkpointUpdate(tx, cleanup);
1793                    }
1794                });
1795                pageFile.flush();
1796                // after the index update such that partial removal does not leave dangling references in the index.
1797                journal.removeDataFiles(filesToGc);
1798            } finally {
1799                this.indexLock.writeLock().unlock();
1800            }
1801
1802        } finally {
1803            checkpointLock.writeLock().unlock();
1804        }
1805    }
1806
1807    /**
1808     * @param tx
1809     * @throws IOException
1810     */
1811    Set<Integer> checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
1812        MDC.put("activemq.persistenceDir", getDirectory().getName());
1813        LOG.debug("Checkpoint started.");
1814
1815        // reflect last update exclusive of current checkpoint
1816        Location lastUpdate = metadata.lastUpdate;
1817
1818        metadata.state = OPEN_STATE;
1819        metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit();
1820        if (metadata.ackMessageFileMapDirtyFlag.get() || (metadata.ackMessageFileMapLocation == null)) {
1821            metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap();
1822        }
1823        metadata.ackMessageFileMapDirtyFlag.lazySet(false);
1824        Location[] inProgressTxRange = getInProgressTxLocationRange();
1825        metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
1826        tx.store(metadata.page, metadataMarshaller, true);
1827
1828        final TreeSet<Integer> gcCandidateSet = new TreeSet<>();
1829        if (cleanup) {
1830
1831            final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet());
1832            gcCandidateSet.addAll(completeFileSet);
1833
1834            if (LOG.isTraceEnabled()) {
1835                LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet);
1836            }
1837
1838            if (lastUpdate != null) {
1839                // we won't delete past the last update, ackCompaction journal can be a candidate in error
1840                gcCandidateSet.removeAll(new TreeSet<Integer>(gcCandidateSet.tailSet(lastUpdate.getDataFileId())));
1841            }
1842
1843            // Don't GC files under replication
1844            if( journalFilesBeingReplicated!=null ) {
1845                gcCandidateSet.removeAll(journalFilesBeingReplicated);
1846            }
1847
1848            if (metadata.producerSequenceIdTrackerLocation != null) {
1849                int dataFileId = metadata.producerSequenceIdTrackerLocation.getDataFileId();
1850                if (gcCandidateSet.contains(dataFileId) && gcCandidateSet.first() == dataFileId) {
1851                    // rewrite so we don't prevent gc
1852                    metadata.producerSequenceIdTracker.setModified(true);
1853                    if (LOG.isTraceEnabled()) {
1854                        LOG.trace("rewriting producerSequenceIdTracker:" + metadata.producerSequenceIdTrackerLocation);
1855                    }
1856                }
1857                gcCandidateSet.remove(dataFileId);
1858                if (LOG.isTraceEnabled()) {
1859                    LOG.trace("gc candidates after producerSequenceIdTrackerLocation:" + metadata.producerSequenceIdTrackerLocation + ", " + gcCandidateSet);
1860                }
1861            }
1862
1863            if (metadata.ackMessageFileMapLocation != null) {
1864                int dataFileId = metadata.ackMessageFileMapLocation.getDataFileId();
1865                gcCandidateSet.remove(dataFileId);
1866                if (LOG.isTraceEnabled()) {
1867                    LOG.trace("gc candidates after ackMessageFileMapLocation:" + metadata.ackMessageFileMapLocation + ", " + gcCandidateSet);
1868                }
1869            }
1870
1871            // Don't GC files referenced by in-progress tx
1872            if (inProgressTxRange[0] != null) {
1873                for (int pendingTx=inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) {
1874                    gcCandidateSet.remove(pendingTx);
1875                }
1876            }
1877            if (LOG.isTraceEnabled()) {
1878                LOG.trace("gc candidates after in progress tx range:" + Arrays.asList(inProgressTxRange) + ", " + gcCandidateSet);
1879            }
1880
1881            // Go through all the destinations to see if any of them can remove GC candidates.
1882            for (Entry<String, StoredDestination> entry : storedDestinations.entrySet()) {
1883                if( gcCandidateSet.isEmpty() ) {
1884                    break;
1885                }
1886
1887                // Use a visitor to cut down the number of pages that we load
1888                entry.getValue().locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
1889                    int last=-1;
1890                    @Override
1891                    public boolean isInterestedInKeysBetween(Location first, Location second) {
1892                        if( first==null ) {
1893                            SortedSet<Integer> subset = gcCandidateSet.headSet(second.getDataFileId()+1);
1894                            if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) {
1895                                subset.remove(second.getDataFileId());
1896                            }
1897                            return !subset.isEmpty();
1898                        } else if( second==null ) {
1899                            SortedSet<Integer> subset = gcCandidateSet.tailSet(first.getDataFileId());
1900                            if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) {
1901                                subset.remove(first.getDataFileId());
1902                            }
1903                            return !subset.isEmpty();
1904                        } else {
1905                            SortedSet<Integer> subset = gcCandidateSet.subSet(first.getDataFileId(), second.getDataFileId()+1);
1906                            if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) {
1907                                subset.remove(first.getDataFileId());
1908                            }
1909                            if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) {
1910                                subset.remove(second.getDataFileId());
1911                            }
1912                            return !subset.isEmpty();
1913                        }
1914                    }
1915
1916                    @Override
1917                    public void visit(List<Location> keys, List<Long> values) {
1918                        for (Location l : keys) {
1919                            int fileId = l.getDataFileId();
1920                            if( last != fileId ) {
1921                                gcCandidateSet.remove(fileId);
1922                                last = fileId;
1923                            }
1924                        }
1925                    }
1926                });
1927
1928                // Durable Subscription
1929                if (entry.getValue().subLocations != null) {
1930                    Iterator<Entry<String, Location>> iter = entry.getValue().subLocations.iterator(tx);
1931                    while (iter.hasNext()) {
1932                        Entry<String, Location> subscription = iter.next();
1933                        int dataFileId = subscription.getValue().getDataFileId();
1934
1935                        // Move subscription along if it has no outstanding messages that need ack'd
1936                        // and its in the last log file in the journal.
1937                        if (!gcCandidateSet.isEmpty() && gcCandidateSet.first() == dataFileId) {
1938                            final StoredDestination destination = entry.getValue();
1939                            final String subscriptionKey = subscription.getKey();
1940                            SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey);
1941
1942                            // When pending is size one that is the next message Id meaning there
1943                            // are no pending messages currently.
1944                            if (pendingAcks == null || pendingAcks.isEmpty() ||
1945                                (pendingAcks.size() == 1 && pendingAcks.getTail().range() == 1)) {
1946
1947                                if (LOG.isTraceEnabled()) {
1948                                    LOG.trace("Found candidate for rewrite: sub {} on {} from file {}", subscriptionKey, entry.getKey(), dataFileId);
1949                                }
1950
1951                                final KahaSubscriptionCommand kahaSub =
1952                                    destination.subscriptions.get(tx, subscriptionKey);
1953                                destination.subLocations.put(
1954                                    tx, subscriptionKey, checkpointSubscriptionCommand(kahaSub));
1955
1956                                // Skips the remove from candidates if we rewrote the subscription
1957                                // in order to prevent duplicate subscription commands on recover.
1958                                // If another subscription is on the same file and isn't rewritten
1959                                // than it will remove the file from the set.
1960                                continue;
1961                            }
1962                        }
1963
1964                        if (LOG.isTraceEnabled()) {
1965                            final StoredDestination destination = entry.getValue();
1966                            final String subscriptionKey = subscription.getKey();
1967                            final SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey);
1968                            LOG.trace("sub {} on {} in dataFile {} has pendingCount {}", subscriptionKey, entry.getKey(), dataFileId, pendingAcks.rangeSize()-1);
1969                        }
1970                        gcCandidateSet.remove(dataFileId);
1971                    }
1972                }
1973
1974                if (LOG.isTraceEnabled()) {
1975                    LOG.trace("gc candidates after dest:" + entry.getKey() + ", " + gcCandidateSet);
1976                }
1977            }
1978
1979            // check we are not deleting file with ack for in-use journal files
1980            if (LOG.isTraceEnabled()) {
1981                LOG.trace("gc candidates: " + gcCandidateSet);
1982                LOG.trace("ackMessageFileMap: " +  metadata.ackMessageFileMap);
1983            }
1984
1985            boolean ackMessageFileMapMod = false;
1986            Iterator<Integer> candidates = gcCandidateSet.iterator();
1987            while (candidates.hasNext()) {
1988                Integer candidate = candidates.next();
1989                Set<Integer> referencedFileIds = metadata.ackMessageFileMap.get(candidate);
1990                if (referencedFileIds != null) {
1991                    for (Integer referencedFileId : referencedFileIds) {
1992                        if (completeFileSet.contains(referencedFileId) && !gcCandidateSet.contains(referencedFileId)) {
1993                            // active file that is not targeted for deletion is referenced so don't delete
1994                            candidates.remove();
1995                            break;
1996                        }
1997                    }
1998                    if (gcCandidateSet.contains(candidate)) {
1999                        ackMessageFileMapMod |= (metadata.ackMessageFileMap.remove(candidate) != null);
2000                        metadata.ackMessageFileMapDirtyFlag.lazySet(true);
2001                    } else {
2002                        if (LOG.isTraceEnabled()) {
2003                            LOG.trace("not removing data file: " + candidate
2004                                    + " as contained ack(s) refer to referenced file: " + referencedFileIds);
2005                        }
2006                    }
2007                }
2008            }
2009
2010            if (!gcCandidateSet.isEmpty()) {
2011                LOG.debug("Cleanup removing the data files: {}", gcCandidateSet);
2012                for (Integer candidate : gcCandidateSet) {
2013                    for (Set<Integer> ackFiles : metadata.ackMessageFileMap.values()) {
2014                        ackMessageFileMapMod |= ackFiles.remove(candidate);
2015                        metadata.ackMessageFileMapDirtyFlag.lazySet(true);
2016                    }
2017                }
2018                if (ackMessageFileMapMod) {
2019                    checkpointUpdate(tx, false);
2020                }
2021            } else if (isEnableAckCompaction()) {
2022                if (++checkPointCyclesWithNoGC >= getCompactAcksAfterNoGC()) {
2023                    // First check length of journal to make sure it makes sense to even try.
2024                    //
2025                    // If there is only one journal file with Acks in it we don't need to move
2026                    // it since it won't be chained to any later logs.
2027                    //
2028                    // If the logs haven't grown since the last time then we need to compact
2029                    // otherwise there seems to still be room for growth and we don't need to incur
2030                    // the overhead.  Depending on configuration this check can be avoided and
2031                    // Ack compaction will run any time the store has not GC'd a journal file in
2032                    // the configured amount of cycles.
2033                    if (metadata.ackMessageFileMap.size() > 1 &&
2034                        (journalLogOnLastCompactionCheck == journal.getCurrentDataFileId() || isCompactAcksIgnoresStoreGrowth())) {
2035
2036                        LOG.trace("No files GC'd checking if threshold to ACK compaction has been met.");
2037                        try {
2038                            scheduler.execute(new AckCompactionRunner());
2039                        } catch (Exception ex) {
2040                            LOG.warn("Error on queueing the Ack Compactor", ex);
2041                        }
2042                    } else {
2043                        LOG.trace("Journal activity detected, no Ack compaction scheduled.");
2044                    }
2045
2046                    checkPointCyclesWithNoGC = 0;
2047                } else {
2048                    LOG.trace("Not yet time to check for compaction: {} of {} cycles",
2049                              checkPointCyclesWithNoGC, getCompactAcksAfterNoGC());
2050                }
2051
2052                journalLogOnLastCompactionCheck = journal.getCurrentDataFileId();
2053            }
2054        }
2055        MDC.remove("activemq.persistenceDir");
2056
2057        LOG.debug("Checkpoint done.");
2058        return gcCandidateSet;
2059    }
2060
2061    private final class AckCompactionRunner implements Runnable {
2062
2063        @Override
2064        public void run() {
2065
2066            int journalToAdvance = -1;
2067            Set<Integer> journalLogsReferenced = new HashSet<>();
2068
2069            //flag to know whether the ack forwarding completed without an exception
2070            boolean forwarded = false;
2071
2072            try {
2073                //acquire the checkpoint lock to prevent other threads from
2074                //running a checkpoint while this is running
2075                //
2076                //Normally this task runs on the same executor as the checkpoint task
2077                //so this ack compaction runner wouldn't run at the same time as the checkpoint task.
2078                //
2079                //However, there are two cases where this isn't always true.
2080                //First, the checkpoint() method is public and can be called through the
2081                //PersistenceAdapter interface by someone at the same time this is running.
2082                //Second, a checkpoint is called during shutdown without using the executor.
2083                //
2084                //In the future it might be better to just remove the checkpointLock entirely
2085                //and only use the executor but this would need to be examined for any unintended
2086                //consequences
2087                checkpointLock.readLock().lock();
2088
2089                try {
2090
2091                    // Lock index to capture the ackMessageFileMap data
2092                    indexLock.writeLock().lock();
2093
2094                    // Map keys might not be sorted, find the earliest log file to forward acks
2095                    // from and move only those, future cycles can chip away at more as needed.
2096                    // We won't move files that are themselves rewritten on a previous compaction.
2097                    List<Integer> journalFileIds = new ArrayList<>(metadata.ackMessageFileMap.keySet());
2098                    Collections.sort(journalFileIds);
2099                    for (Integer journalFileId : journalFileIds) {
2100                        DataFile current = journal.getDataFileById(journalFileId);
2101                        if (current != null && current.getTypeCode() != COMPACTED_JOURNAL_FILE) {
2102                            journalToAdvance = journalFileId;
2103                            break;
2104                        }
2105                    }
2106
2107                    // Check if we found one, or if we only found the current file being written to.
2108                    if (journalToAdvance == -1 || blockedFromCompaction(journalToAdvance)) {
2109                        return;
2110                    }
2111
2112                    journalLogsReferenced.addAll(metadata.ackMessageFileMap.get(journalToAdvance));
2113
2114                } finally {
2115                    indexLock.writeLock().unlock();
2116                }
2117
2118                try {
2119                    // Background rewrite of the old acks
2120                    forwardAllAcks(journalToAdvance, journalLogsReferenced);
2121                    forwarded = true;
2122                } catch (IOException ioe) {
2123                    LOG.error("Forwarding of acks failed", ioe);
2124                    brokerService.handleIOException(ioe);
2125                } catch (Throwable e) {
2126                    LOG.error("Forwarding of acks failed", e);
2127                    brokerService.handleIOException(IOExceptionSupport.create(e));
2128                }
2129            } finally {
2130                checkpointLock.readLock().unlock();
2131            }
2132
2133            try {
2134                if (forwarded) {
2135                    // Checkpoint with changes from the ackMessageFileMap
2136                    checkpointUpdate(false);
2137                }
2138            } catch (IOException ioe) {
2139                LOG.error("Checkpoint failed", ioe);
2140                brokerService.handleIOException(ioe);
2141            } catch (Throwable e) {
2142                LOG.error("Checkpoint failed", e);
2143                brokerService.handleIOException(IOExceptionSupport.create(e));
2144            }
2145        }
2146    }
2147
2148    // called with the index lock held
2149    private boolean blockedFromCompaction(int journalToAdvance) {
2150        // don't forward the current data file
2151        if (journalToAdvance == journal.getCurrentDataFileId()) {
2152            return true;
2153        }
2154        // don't forward any data file with inflight transaction records because it will whack the tx - data file link
2155        // in the ack map when all acks are migrated (now that the ack map is not just for acks)
2156        // TODO: prepare records can be dropped but completion records (maybe only commit outcomes) need to be migrated
2157        // as part of the forward work.
2158        Location[] inProgressTxRange = getInProgressTxLocationRange();
2159        if (inProgressTxRange[0] != null) {
2160            for (int pendingTx = inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) {
2161                if (journalToAdvance == pendingTx) {
2162                    LOG.trace("Compaction target:{} blocked by inflight transaction records: {}", journalToAdvance, inProgressTxRange);
2163                    return true;
2164                }
2165            }
2166        }
2167        return false;
2168    }
2169
2170    private void forwardAllAcks(Integer journalToRead, Set<Integer> journalLogsReferenced) throws IllegalStateException, IOException {
2171        LOG.trace("Attempting to move all acks in journal:{} to the front. Referenced files:{}", journalToRead, journalLogsReferenced);
2172
2173        DataFile forwardsFile = journal.reserveDataFile();
2174        forwardsFile.setTypeCode(COMPACTED_JOURNAL_FILE);
2175        LOG.trace("Reserved file for forwarded acks: {}", forwardsFile);
2176
2177        Map<Integer, Set<Integer>> updatedAckLocations = new HashMap<>();
2178
2179        try (TargetedDataFileAppender appender = new TargetedDataFileAppender(journal, forwardsFile);) {
2180            KahaRewrittenDataFileCommand compactionMarker = new KahaRewrittenDataFileCommand();
2181            compactionMarker.setSourceDataFileId(journalToRead);
2182            compactionMarker.setRewriteType(forwardsFile.getTypeCode());
2183
2184            ByteSequence payload = toByteSequence(compactionMarker);
2185            appender.storeItem(payload, Journal.USER_RECORD_TYPE, false);
2186            LOG.trace("Marked ack rewrites file as replacing file: {}", journalToRead);
2187
2188            final Location limit = new Location(journalToRead + 1, 0);
2189            Location nextLocation = getNextLocationForAckForward(new Location(journalToRead, 0), limit);
2190            while (nextLocation != null) {
2191                JournalCommand<?> command = null;
2192                try {
2193                    command = load(nextLocation);
2194                } catch (IOException ex) {
2195                    LOG.trace("Error loading command during ack forward: {}", nextLocation);
2196                }
2197
2198                if (shouldForward(command)) {
2199                    payload = toByteSequence(command);
2200                    Location location = appender.storeItem(payload, Journal.USER_RECORD_TYPE, false);
2201                    updatedAckLocations.put(location.getDataFileId(), journalLogsReferenced);
2202                }
2203
2204                nextLocation = getNextLocationForAckForward(nextLocation, limit);
2205            }
2206        }
2207
2208        LOG.trace("ACKS forwarded, updates for ack locations: {}", updatedAckLocations);
2209
2210        // Lock index while we update the ackMessageFileMap.
2211        indexLock.writeLock().lock();
2212
2213        // Update the ack map with the new locations of the acks
2214        for (Entry<Integer, Set<Integer>> entry : updatedAckLocations.entrySet()) {
2215            Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(entry.getKey());
2216            if (referenceFileIds == null) {
2217                referenceFileIds = new HashSet<>();
2218                referenceFileIds.addAll(entry.getValue());
2219                metadata.ackMessageFileMap.put(entry.getKey(), referenceFileIds);
2220                metadata.ackMessageFileMapDirtyFlag.lazySet(true);
2221            } else {
2222                referenceFileIds.addAll(entry.getValue());
2223            }
2224        }
2225
2226        // remove the old location data from the ack map so that the old journal log file can
2227        // be removed on next GC.
2228        metadata.ackMessageFileMap.remove(journalToRead);
2229        metadata.ackMessageFileMapDirtyFlag.lazySet(true);
2230
2231        indexLock.writeLock().unlock();
2232
2233        LOG.trace("ACK File Map following updates: {}", metadata.ackMessageFileMap);
2234    }
2235
2236    private boolean shouldForward(JournalCommand<?> command) {
2237        if (command == null) {
2238            return false;
2239        }
2240
2241        return (command instanceof KahaRemoveMessageCommand || command instanceof KahaCommitCommand);
2242    }
2243
2244    private Location getNextLocationForAckForward(final Location nextLocation, final Location limit) {
2245        //getNextLocation() can throw an IOException, we should handle it and set
2246        //nextLocation to null and abort gracefully
2247        //Should not happen in the normal case
2248        Location location = null;
2249        try {
2250            location = journal.getNextLocation(nextLocation, limit);
2251        } catch (IOException e) {
2252            LOG.warn("Failed to load next journal location after: {}, reason: {}", nextLocation, e);
2253            if (LOG.isDebugEnabled()) {
2254                LOG.debug("Failed to load next journal location after: {}", nextLocation, e);
2255            }
2256        }
2257        return location;
2258    }
2259
2260    final Runnable nullCompletionCallback = new Runnable() {
2261        @Override
2262        public void run() {
2263        }
2264    };
2265
2266    private Location checkpointProducerAudit() throws IOException {
2267        if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) {
2268            ByteArrayOutputStream baos = new ByteArrayOutputStream();
2269            ObjectOutputStream oout = new ObjectOutputStream(baos);
2270            oout.writeObject(metadata.producerSequenceIdTracker);
2271            oout.flush();
2272            oout.close();
2273            // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
2274            Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback);
2275            try {
2276                location.getLatch().await();
2277                if (location.getException().get() != null) {
2278                    throw location.getException().get();
2279                }
2280            } catch (InterruptedException e) {
2281                throw new InterruptedIOException(e.toString());
2282            }
2283            return location;
2284        }
2285        return metadata.producerSequenceIdTrackerLocation;
2286    }
2287
2288    private Location checkpointAckMessageFileMap() throws IOException {
2289        ByteArrayOutputStream baos = new ByteArrayOutputStream();
2290        ObjectOutputStream oout = new ObjectOutputStream(baos);
2291        oout.writeObject(metadata.ackMessageFileMap);
2292        oout.flush();
2293        oout.close();
2294        // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
2295        Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback);
2296        try {
2297            location.getLatch().await();
2298        } catch (InterruptedException e) {
2299            throw new InterruptedIOException(e.toString());
2300        }
2301        return location;
2302    }
2303
2304    private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException {
2305
2306        ByteSequence sequence = toByteSequence(subscription);
2307        Location location = journal.write(sequence, nullCompletionCallback) ;
2308
2309        try {
2310            location.getLatch().await();
2311        } catch (InterruptedException e) {
2312            throw new InterruptedIOException(e.toString());
2313        }
2314        return location;
2315    }
2316
2317    public HashSet<Integer> getJournalFilesBeingReplicated() {
2318        return journalFilesBeingReplicated;
2319    }
2320
2321    // /////////////////////////////////////////////////////////////////
2322    // StoredDestination related implementation methods.
2323    // /////////////////////////////////////////////////////////////////
2324
2325    protected final HashMap<String, StoredDestination> storedDestinations = new HashMap<>();
2326
2327    static class MessageKeys {
2328        final String messageId;
2329        final Location location;
2330
2331        public MessageKeys(String messageId, Location location) {
2332            this.messageId=messageId;
2333            this.location=location;
2334        }
2335
2336        @Override
2337        public String toString() {
2338            return "["+messageId+","+location+"]";
2339        }
2340    }
2341
2342    protected class MessageKeysMarshaller extends VariableMarshaller<MessageKeys> {
2343        final LocationSizeMarshaller locationSizeMarshaller = new LocationSizeMarshaller();
2344
2345        @Override
2346        public MessageKeys readPayload(DataInput dataIn) throws IOException {
2347            return new MessageKeys(dataIn.readUTF(), locationSizeMarshaller.readPayload(dataIn));
2348        }
2349
2350        @Override
2351        public void writePayload(MessageKeys object, DataOutput dataOut) throws IOException {
2352            dataOut.writeUTF(object.messageId);
2353            locationSizeMarshaller.writePayload(object.location, dataOut);
2354        }
2355    }
2356
2357    class LastAck {
2358        long lastAckedSequence;
2359        byte priority;
2360
2361        public LastAck(LastAck source) {
2362            this.lastAckedSequence = source.lastAckedSequence;
2363            this.priority = source.priority;
2364        }
2365
2366        public LastAck() {
2367            this.priority = MessageOrderIndex.HI;
2368        }
2369
2370        public LastAck(long ackLocation) {
2371            this.lastAckedSequence = ackLocation;
2372            this.priority = MessageOrderIndex.LO;
2373        }
2374
2375        public LastAck(long ackLocation, byte priority) {
2376            this.lastAckedSequence = ackLocation;
2377            this.priority = priority;
2378        }
2379
2380        @Override
2381        public String toString() {
2382            return "[" + lastAckedSequence + ":" + priority + "]";
2383        }
2384    }
2385
2386    protected class LastAckMarshaller implements Marshaller<LastAck> {
2387
2388        @Override
2389        public void writePayload(LastAck object, DataOutput dataOut) throws IOException {
2390            dataOut.writeLong(object.lastAckedSequence);
2391            dataOut.writeByte(object.priority);
2392        }
2393
2394        @Override
2395        public LastAck readPayload(DataInput dataIn) throws IOException {
2396            LastAck lastAcked = new LastAck();
2397            lastAcked.lastAckedSequence = dataIn.readLong();
2398            if (metadata.version >= 3) {
2399                lastAcked.priority = dataIn.readByte();
2400            }
2401            return lastAcked;
2402        }
2403
2404        @Override
2405        public int getFixedSize() {
2406            return 9;
2407        }
2408
2409        @Override
2410        public LastAck deepCopy(LastAck source) {
2411            return new LastAck(source);
2412        }
2413
2414        @Override
2415        public boolean isDeepCopySupported() {
2416            return true;
2417        }
2418    }
2419
2420    class StoredMessageStoreStatistics {
2421        private PageFile pageFile;
2422        private Page<MessageStoreStatistics> page;
2423        private long pageId;
2424        private AtomicBoolean loaded = new AtomicBoolean();
2425        private MessageStoreStatisticsMarshaller messageStoreStatisticsMarshaller = new MessageStoreStatisticsMarshaller();
2426
2427        StoredMessageStoreStatistics(PageFile pageFile, long pageId) {
2428            this.pageId = pageId;
2429            this.pageFile = pageFile;
2430        }
2431
2432        StoredMessageStoreStatistics(PageFile pageFile, Page page) {
2433            this(pageFile, page.getPageId());
2434        }
2435
2436        public long getPageId() {
2437            return pageId;
2438        }
2439
2440        synchronized void load(Transaction tx) throws IOException {
2441            if (loaded.compareAndSet(false, true)) {
2442                page = tx.load(pageId, null);
2443
2444                if (page.getType() == Page.PAGE_FREE_TYPE) {
2445                    page.set(null);
2446                    tx.store(page, messageStoreStatisticsMarshaller, true);
2447                }
2448            }
2449            page = tx.load(pageId, messageStoreStatisticsMarshaller);
2450        }
2451
2452        synchronized MessageStoreStatistics get(Transaction tx) throws IOException {
2453            load(tx);
2454            return page.get();
2455        }
2456
2457        synchronized void put(Transaction tx, MessageStoreStatistics storeStatistics) throws IOException {
2458            if (page == null) {
2459                page = tx.load(pageId, messageStoreStatisticsMarshaller);
2460            }
2461
2462            page.set(storeStatistics);
2463
2464            tx.store(page, messageStoreStatisticsMarshaller, true);
2465        }
2466    }
2467    class StoredDestination {
2468
2469        MessageOrderIndex orderIndex = new MessageOrderIndex();
2470        BTreeIndex<Location, Long> locationIndex;
2471        BTreeIndex<String, Long> messageIdIndex;
2472
2473        // These bits are only set for Topics
2474        BTreeIndex<String, KahaSubscriptionCommand> subscriptions;
2475        BTreeIndex<String, LastAck> subscriptionAcks;
2476        HashMap<String, MessageOrderCursor> subscriptionCursors;
2477        ListIndex<String, SequenceSet> ackPositions;
2478        ListIndex<String, Location> subLocations;
2479
2480        // Transient data used to track which Messages are no longer needed.
2481        final HashSet<String> subscriptionCache = new LinkedHashSet<>();
2482
2483        StoredMessageStoreStatistics messageStoreStatistics;
2484
2485        public void trackPendingAdd(Long seq) {
2486            orderIndex.trackPendingAdd(seq);
2487        }
2488
2489        public void trackPendingAddComplete(Long seq) {
2490            orderIndex.trackPendingAddComplete(seq);
2491        }
2492
2493        @Override
2494        public String toString() {
2495            return "nextSeq:" + orderIndex.nextMessageId + ",lastRet:" + orderIndex.cursor + ",pending:" + orderIndex.pendingAdditions.size();
2496        }
2497    }
2498
2499    protected  class MessageStoreStatisticsMarshaller extends VariableMarshaller<MessageStoreStatistics> {
2500
2501        @Override
2502        public void writePayload(final MessageStoreStatistics object, final DataOutput dataOut) throws IOException {
2503            dataOut.writeBoolean(null != object);
2504            if (object != null) {
2505                dataOut.writeLong(object.getMessageCount().getCount());
2506                dataOut.writeLong(object.getMessageSize().getTotalSize());
2507                dataOut.writeLong(object.getMessageSize().getMaxSize());
2508                dataOut.writeLong(object.getMessageSize().getMinSize());
2509                dataOut.writeLong(object.getMessageSize().getCount());
2510            }
2511        }
2512
2513        @Override
2514        public MessageStoreStatistics readPayload(final DataInput dataIn) throws IOException {
2515
2516            if (!dataIn.readBoolean()) {
2517                return null;
2518            }
2519
2520            MessageStoreStatistics messageStoreStatistics = new MessageStoreStatistics();
2521            messageStoreStatistics.getMessageCount().setCount(dataIn.readLong());
2522            messageStoreStatistics.getMessageSize().setTotalSize(dataIn.readLong());
2523            messageStoreStatistics.getMessageSize().setMaxSize(dataIn.readLong());
2524            messageStoreStatistics.getMessageSize().setMinSize(dataIn.readLong());
2525            messageStoreStatistics.getMessageSize().setCount(dataIn.readLong());
2526
2527            return messageStoreStatistics;
2528        }
2529    }
2530
2531    protected class StoredDestinationMarshaller extends VariableMarshaller<StoredDestination> {
2532
2533        final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller();
2534
2535        @Override
2536        public StoredDestination readPayload(final DataInput dataIn) throws IOException {
2537            final StoredDestination value = new StoredDestination();
2538            value.orderIndex.defaultPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2539            value.locationIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2540            value.messageIdIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2541
2542            if (dataIn.readBoolean()) {
2543                value.subscriptions = new BTreeIndex<>(pageFile, dataIn.readLong());
2544                value.subscriptionAcks = new BTreeIndex<>(pageFile, dataIn.readLong());
2545                if (metadata.version >= 4) {
2546                    value.ackPositions = new ListIndex<>(pageFile, dataIn.readLong());
2547                } else {
2548                    // upgrade
2549                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
2550                        @Override
2551                        public void execute(Transaction tx) throws IOException {
2552                            LinkedHashMap<String, SequenceSet> temp = new LinkedHashMap<>();
2553
2554                            if (metadata.version >= 3) {
2555                                // migrate
2556                                BTreeIndex<Long, HashSet<String>> oldAckPositions =
2557                                        new BTreeIndex<>(pageFile, dataIn.readLong());
2558                                oldAckPositions.setKeyMarshaller(LongMarshaller.INSTANCE);
2559                                oldAckPositions.setValueMarshaller(HashSetStringMarshaller.INSTANCE);
2560                                oldAckPositions.load(tx);
2561
2562
2563                                // Do the initial build of the data in memory before writing into the store
2564                                // based Ack Positions List to avoid a lot of disk thrashing.
2565                                Iterator<Entry<Long, HashSet<String>>> iterator = oldAckPositions.iterator(tx);
2566                                while (iterator.hasNext()) {
2567                                    Entry<Long, HashSet<String>> entry = iterator.next();
2568
2569                                    for(String subKey : entry.getValue()) {
2570                                        SequenceSet pendingAcks = temp.get(subKey);
2571                                        if (pendingAcks == null) {
2572                                            pendingAcks = new SequenceSet();
2573                                            temp.put(subKey, pendingAcks);
2574                                        }
2575
2576                                        pendingAcks.add(entry.getKey());
2577                                    }
2578                                }
2579                            }
2580                            // Now move the pending messages to ack data into the store backed
2581                            // structure.
2582                            value.ackPositions = new ListIndex<>(pageFile, tx.allocate());
2583                            value.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE);
2584                            value.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE);
2585                            value.ackPositions.load(tx);
2586                            for(String subscriptionKey : temp.keySet()) {
2587                                value.ackPositions.put(tx, subscriptionKey, temp.get(subscriptionKey));
2588                            }
2589
2590                        }
2591                    });
2592                }
2593
2594                if (metadata.version >= 5) {
2595                    value.subLocations = new ListIndex<>(pageFile, dataIn.readLong());
2596                } else {
2597                    // upgrade
2598                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
2599                        @Override
2600                        public void execute(Transaction tx) throws IOException {
2601                            value.subLocations = new ListIndex<>(pageFile, tx.allocate());
2602                            value.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE);
2603                            value.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE);
2604                            value.subLocations.load(tx);
2605                        }
2606                    });
2607                }
2608            }
2609
2610            if (metadata.version >= 2) {
2611                value.orderIndex.lowPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2612                value.orderIndex.highPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2613            } else {
2614                // upgrade
2615                pageFile.tx().execute(new Transaction.Closure<IOException>() {
2616                    @Override
2617                    public void execute(Transaction tx) throws IOException {
2618                        value.orderIndex.lowPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
2619                        value.orderIndex.lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
2620                        value.orderIndex.lowPriorityIndex.setValueMarshaller(messageKeysMarshaller);
2621                        value.orderIndex.lowPriorityIndex.load(tx);
2622
2623                        value.orderIndex.highPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
2624                        value.orderIndex.highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
2625                        value.orderIndex.highPriorityIndex.setValueMarshaller(messageKeysMarshaller);
2626                        value.orderIndex.highPriorityIndex.load(tx);
2627                    }
2628                });
2629            }
2630
2631            if (metadata.version >= 7) {
2632                value.messageStoreStatistics = new StoredMessageStoreStatistics(pageFile, dataIn.readLong());
2633            } else {
2634                pageFile.tx().execute(tx -> {
2635                    value.messageStoreStatistics = new StoredMessageStoreStatistics(pageFile, tx.allocate());
2636                    value.messageStoreStatistics.load(tx);
2637                });
2638            }
2639
2640            return value;
2641        }
2642
2643        @Override
2644        public void writePayload(StoredDestination value, DataOutput dataOut) throws IOException {
2645            dataOut.writeLong(value.orderIndex.defaultPriorityIndex.getPageId());
2646            dataOut.writeLong(value.locationIndex.getPageId());
2647            dataOut.writeLong(value.messageIdIndex.getPageId());
2648            if (value.subscriptions != null) {
2649                dataOut.writeBoolean(true);
2650                dataOut.writeLong(value.subscriptions.getPageId());
2651                dataOut.writeLong(value.subscriptionAcks.getPageId());
2652                dataOut.writeLong(value.ackPositions.getHeadPageId());
2653                dataOut.writeLong(value.subLocations.getHeadPageId());
2654            } else {
2655                dataOut.writeBoolean(false);
2656            }
2657            dataOut.writeLong(value.orderIndex.lowPriorityIndex.getPageId());
2658            dataOut.writeLong(value.orderIndex.highPriorityIndex.getPageId());
2659            dataOut.writeLong(value.messageStoreStatistics.getPageId());
2660        }
2661    }
2662
2663    static class KahaSubscriptionCommandMarshaller extends VariableMarshaller<KahaSubscriptionCommand> {
2664        final static KahaSubscriptionCommandMarshaller INSTANCE = new KahaSubscriptionCommandMarshaller();
2665
2666        @Override
2667        public KahaSubscriptionCommand readPayload(DataInput dataIn) throws IOException {
2668            KahaSubscriptionCommand rc = new KahaSubscriptionCommand();
2669            rc.mergeFramed((InputStream)dataIn);
2670            return rc;
2671        }
2672
2673        @Override
2674        public void writePayload(KahaSubscriptionCommand object, DataOutput dataOut) throws IOException {
2675            object.writeFramed((OutputStream)dataOut);
2676        }
2677    }
2678
2679    protected StoredDestination getStoredDestination(KahaDestination destination, Transaction tx) throws IOException {
2680        String key = key(destination);
2681        StoredDestination rc = storedDestinations.get(key);
2682        if (rc == null) {
2683            boolean topic = destination.getType() == KahaDestination.DestinationType.TOPIC || destination.getType() == KahaDestination.DestinationType.TEMP_TOPIC;
2684            rc = loadStoredDestination(tx, key, topic);
2685            // Cache it. We may want to remove/unload destinations from the
2686            // cache that are not used for a while
2687            // to reduce memory usage.
2688            storedDestinations.put(key, rc);
2689        }
2690        return rc;
2691    }
2692
2693    protected MessageStoreStatistics getStoredMessageStoreStatistics(KahaDestination destination, Transaction tx) throws IOException {
2694        StoredDestination sd = getStoredDestination(destination, tx);
2695        return  sd != null && sd.messageStoreStatistics != null ? sd.messageStoreStatistics.get(tx) : null;
2696    }
2697
2698    protected StoredDestination getExistingStoredDestination(KahaDestination destination, Transaction tx) throws IOException {
2699        String key = key(destination);
2700        StoredDestination rc = storedDestinations.get(key);
2701        if (rc == null && metadata.destinations.containsKey(tx, key)) {
2702            rc = getStoredDestination(destination, tx);
2703        }
2704        return rc;
2705    }
2706
2707    /**
2708     * @param tx
2709     * @param key
2710     * @param topic
2711     * @return
2712     * @throws IOException
2713     */
2714    private StoredDestination loadStoredDestination(Transaction tx, String key, boolean topic) throws IOException {
2715        // Try to load the existing indexes..
2716        StoredDestination rc = metadata.destinations.get(tx, key);
2717        if (rc == null) {
2718            // Brand new destination.. allocate indexes for it.
2719            rc = new StoredDestination();
2720            rc.orderIndex.allocate(tx);
2721            rc.locationIndex = new BTreeIndex<>(pageFile, tx.allocate());
2722            rc.messageIdIndex = new BTreeIndex<>(pageFile, tx.allocate());
2723
2724            if (topic) {
2725                rc.subscriptions = new BTreeIndex<>(pageFile, tx.allocate());
2726                rc.subscriptionAcks = new BTreeIndex<>(pageFile, tx.allocate());
2727                rc.ackPositions = new ListIndex<>(pageFile, tx.allocate());
2728                rc.subLocations = new ListIndex<>(pageFile, tx.allocate());
2729            }
2730
2731            rc.messageStoreStatistics = new StoredMessageStoreStatistics(pageFile, tx.allocate());
2732
2733            metadata.destinations.put(tx, key, rc);
2734        }
2735
2736        rc.messageStoreStatistics.load(tx);
2737
2738        // Configure the marshalers and load.
2739        rc.orderIndex.load(tx);
2740
2741        // Figure out the next key using the last entry in the destination.
2742        rc.orderIndex.configureLast(tx);
2743
2744        rc.locationIndex.setKeyMarshaller(new LocationSizeMarshaller());
2745        rc.locationIndex.setValueMarshaller(LongMarshaller.INSTANCE);
2746        rc.locationIndex.load(tx);
2747
2748        rc.messageIdIndex.setKeyMarshaller(StringMarshaller.INSTANCE);
2749        rc.messageIdIndex.setValueMarshaller(LongMarshaller.INSTANCE);
2750        rc.messageIdIndex.load(tx);
2751
2752        //go through an upgrade old index if older than version 6
2753        if (metadata.version < 6) {
2754            for (Iterator<Entry<Location, Long>> iterator = rc.locationIndex.iterator(tx); iterator.hasNext(); ) {
2755                Entry<Location, Long> entry = iterator.next();
2756                // modify so it is upgraded
2757                rc.locationIndex.put(tx, entry.getKey(), entry.getValue());
2758            }
2759            //upgrade the order index
2760            for (Iterator<Entry<Long, MessageKeys>> iterator = rc.orderIndex.iterator(tx); iterator.hasNext(); ) {
2761                Entry<Long, MessageKeys> entry = iterator.next();
2762                //call get so that the last priority is updated
2763                rc.orderIndex.get(tx, entry.getKey());
2764                rc.orderIndex.put(tx, rc.orderIndex.lastGetPriority(), entry.getKey(), entry.getValue());
2765            }
2766        }
2767
2768        // If it was a topic...
2769        if (topic) {
2770
2771            rc.subscriptions.setKeyMarshaller(StringMarshaller.INSTANCE);
2772            rc.subscriptions.setValueMarshaller(KahaSubscriptionCommandMarshaller.INSTANCE);
2773            rc.subscriptions.load(tx);
2774
2775            rc.subscriptionAcks.setKeyMarshaller(StringMarshaller.INSTANCE);
2776            rc.subscriptionAcks.setValueMarshaller(new LastAckMarshaller());
2777            rc.subscriptionAcks.load(tx);
2778
2779            rc.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE);
2780            rc.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE);
2781            rc.ackPositions.load(tx);
2782
2783            rc.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE);
2784            rc.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE);
2785            rc.subLocations.load(tx);
2786
2787            rc.subscriptionCursors = new HashMap<>();
2788
2789            if (metadata.version < 3) {
2790
2791                // on upgrade need to fill ackLocation with available messages past last ack
2792                for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) {
2793                    Entry<String, LastAck> entry = iterator.next();
2794                    for (Iterator<Entry<Long, MessageKeys>> orderIterator =
2795                            rc.orderIndex.iterator(tx, new MessageOrderCursor(entry.getValue().lastAckedSequence)); orderIterator.hasNext(); ) {
2796                        Long sequence = orderIterator.next().getKey();
2797                        addAckLocation(tx, rc, sequence, entry.getKey());
2798                    }
2799                    // modify so it is upgraded
2800                    rc.subscriptionAcks.put(tx, entry.getKey(), entry.getValue());
2801                }
2802            }
2803
2804            // Configure the subscription cache
2805            for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) {
2806                Entry<String, LastAck> entry = iterator.next();
2807                rc.subscriptionCache.add(entry.getKey());
2808            }
2809
2810            if (rc.orderIndex.nextMessageId == 0) {
2811                // check for existing durable sub all acked out - pull next seq from acks as messages are gone
2812                if (!rc.subscriptionAcks.isEmpty(tx)) {
2813                    for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext();) {
2814                        Entry<String, LastAck> entry = iterator.next();
2815                        rc.orderIndex.nextMessageId =
2816                                Math.max(rc.orderIndex.nextMessageId, entry.getValue().lastAckedSequence +1);
2817                    }
2818                }
2819            } else {
2820                // update based on ackPositions for unmatched, last entry is always the next
2821                Iterator<Entry<String, SequenceSet>> subscriptions = rc.ackPositions.iterator(tx);
2822                while (subscriptions.hasNext()) {
2823                    Entry<String, SequenceSet> subscription = subscriptions.next();
2824                    SequenceSet pendingAcks = subscription.getValue();
2825                    if (pendingAcks != null && !pendingAcks.isEmpty()) {
2826                        for (Long sequenceId : pendingAcks) {
2827                            rc.orderIndex.nextMessageId = Math.max(rc.orderIndex.nextMessageId, sequenceId);
2828                        }
2829                    }
2830                }
2831            }
2832        }
2833
2834        if (metadata.version < VERSION) {
2835            // store again after upgrade
2836            metadata.destinations.put(tx, key, rc);
2837        }
2838        return rc;
2839    }
2840
2841    /**
2842     * Clear the counter for the destination, if one exists.
2843     *
2844     * @param kahaDestination
2845     */
2846    protected void clearStoreStats(KahaDestination kahaDestination) {
2847        String key = key(kahaDestination);
2848        MessageStoreStatistics storeStats = getStoreStats(key);
2849        MessageStoreSubscriptionStatistics subStats = getSubStats(key);
2850        if (storeStats != null) {
2851            storeStats.reset();
2852        }
2853        if (subStats != null) {
2854            subStats.reset();
2855        }
2856    }
2857
2858    /**
2859     * Update MessageStoreStatistics
2860     *
2861     * @param kahaDestination
2862     * @param size
2863     */
2864    protected void incrementAndAddSizeToStoreStat(Transaction tx, KahaDestination kahaDestination, long size) throws IOException {
2865        StoredDestination sd = getStoredDestination(kahaDestination, tx);
2866        incrementAndAddSizeToStoreStat(tx, key(kahaDestination), sd, size);
2867    }
2868
2869    protected void incrementAndAddSizeToStoreStat(Transaction tx, String kahaDestKey, StoredDestination sd, long size) throws IOException {
2870        MessageStoreStatistics storeStats = getStoreStats(kahaDestKey);
2871        if (storeStats != null) {
2872            incrementAndAddSizeToStoreStat(size, storeStats);
2873            sd.messageStoreStatistics.put(tx, storeStats);
2874        } else if (sd != null){
2875            // During the recovery the storeStats is null
2876            MessageStoreStatistics storedStoreStats = sd.messageStoreStatistics.get(tx);
2877            if (storedStoreStats == null) {
2878                storedStoreStats = new MessageStoreStatistics();
2879            }
2880            incrementAndAddSizeToStoreStat(size, storedStoreStats);
2881            sd.messageStoreStatistics.put(tx, storedStoreStats);
2882        }
2883    }
2884
2885    private void incrementAndAddSizeToStoreStat(final long size, final MessageStoreStatistics storedStoreStats) {
2886        storedStoreStats.getMessageCount().increment();
2887        if (size > 0) {
2888            storedStoreStats.getMessageSize().addSize(size);
2889        }
2890    }
2891
2892    protected void decrementAndSubSizeToStoreStat(Transaction tx, KahaDestination kahaDestination, long size) throws IOException {
2893        StoredDestination sd = getStoredDestination(kahaDestination, tx);
2894        decrementAndSubSizeToStoreStat(tx, key(kahaDestination), sd,size);
2895    }
2896
2897    protected void decrementAndSubSizeToStoreStat(Transaction tx, String kahaDestKey, StoredDestination sd, long size) throws IOException {
2898        MessageStoreStatistics storeStats = getStoreStats(kahaDestKey);
2899        if (storeStats != null) {
2900            decrementAndSubSizeToStoreStat(size, storeStats);
2901            sd.messageStoreStatistics.put(tx, storeStats);
2902        } else if (sd != null){
2903            // During the recovery the storeStats is null
2904            MessageStoreStatistics storedStoreStats = sd.messageStoreStatistics.get(tx);
2905            if (storedStoreStats == null) {
2906                storedStoreStats = new MessageStoreStatistics();
2907            }
2908            decrementAndSubSizeToStoreStat(size, storedStoreStats);
2909            sd.messageStoreStatistics.put(tx, storedStoreStats);
2910        }
2911    }
2912
2913    private void decrementAndSubSizeToStoreStat(final long size, final MessageStoreStatistics storedStoreStats) {
2914        storedStoreStats.getMessageCount().decrement();
2915
2916        if (size > 0) {
2917            storedStoreStats.getMessageSize().addSize(-size);
2918        }
2919    }
2920
2921    protected void incrementAndAddSizeToStoreStat(KahaDestination kahaDestination, String subKey, long size) {
2922        incrementAndAddSizeToStoreStat(key(kahaDestination), subKey, size);
2923    }
2924
2925    protected void incrementAndAddSizeToStoreStat(String kahaDestKey, String subKey, long size) {
2926        if (enableSubscriptionStatistics) {
2927            MessageStoreSubscriptionStatistics subStats = getSubStats(kahaDestKey);
2928            if (subStats != null && subKey != null) {
2929                subStats.getMessageCount(subKey).increment();
2930                if (size > 0) {
2931                    subStats.getMessageSize(subKey).addSize(size);
2932                }
2933            }
2934        }
2935    }
2936
2937
2938    protected void decrementAndSubSizeToStoreStat(String kahaDestKey, String subKey, long size) {
2939        if (enableSubscriptionStatistics) {
2940            MessageStoreSubscriptionStatistics subStats = getSubStats(kahaDestKey);
2941            if (subStats != null && subKey != null) {
2942                subStats.getMessageCount(subKey).decrement();
2943                if (size > 0) {
2944                    subStats.getMessageSize(subKey).addSize(-size);
2945                }
2946            }
2947        }
2948    }
2949
2950    protected void decrementAndSubSizeToStoreStat(KahaDestination kahaDestination, String subKey, long size) {
2951        decrementAndSubSizeToStoreStat(key(kahaDestination), subKey, size);
2952    }
2953
2954    /**
2955     * This is a map to cache MessageStores for a specific
2956     * KahaDestination key
2957     */
2958    protected final ConcurrentMap<String, MessageStore> storeCache =
2959            new ConcurrentHashMap<>();
2960
2961    /**
2962     * Locate the storeMessageSize counter for this KahaDestination
2963     */
2964    protected MessageStoreStatistics getStoreStats(String kahaDestKey) {
2965        MessageStoreStatistics storeStats = null;
2966        try {
2967            MessageStore messageStore = storeCache.get(kahaDestKey);
2968            if (messageStore != null) {
2969                storeStats = messageStore.getMessageStoreStatistics();
2970            }
2971        } catch (Exception e1) {
2972             LOG.error("Getting size counter of destination failed", e1);
2973        }
2974
2975        return storeStats;
2976    }
2977
2978    protected MessageStoreSubscriptionStatistics getSubStats(String kahaDestKey) {
2979        MessageStoreSubscriptionStatistics subStats = null;
2980        try {
2981            MessageStore messageStore = storeCache.get(kahaDestKey);
2982            if (messageStore instanceof TopicMessageStore) {
2983                subStats = ((TopicMessageStore)messageStore).getMessageStoreSubStatistics();
2984            }
2985        } catch (Exception e1) {
2986             LOG.error("Getting size counter of destination failed", e1);
2987        }
2988
2989        return subStats;
2990    }
2991
2992    /**
2993     * Determine whether this Destination matches the DestinationType
2994     *
2995     * @param destination
2996     * @param type
2997     * @return
2998     */
2999    protected boolean matchType(Destination destination,
3000            KahaDestination.DestinationType type) {
3001        if (destination instanceof Topic
3002                && type.equals(KahaDestination.DestinationType.TOPIC)) {
3003            return true;
3004        } else if (destination instanceof Queue
3005                && type.equals(KahaDestination.DestinationType.QUEUE)) {
3006            return true;
3007        }
3008        return false;
3009    }
3010
3011    class LocationSizeMarshaller implements Marshaller<Location> {
3012
3013        public LocationSizeMarshaller() {
3014
3015        }
3016
3017        @Override
3018        public Location readPayload(DataInput dataIn) throws IOException {
3019            Location rc = new Location();
3020            rc.setDataFileId(dataIn.readInt());
3021            rc.setOffset(dataIn.readInt());
3022            if (metadata.version >= 6) {
3023                rc.setSize(dataIn.readInt());
3024            }
3025            return rc;
3026        }
3027
3028        @Override
3029        public void writePayload(Location object, DataOutput dataOut)
3030                throws IOException {
3031            dataOut.writeInt(object.getDataFileId());
3032            dataOut.writeInt(object.getOffset());
3033            dataOut.writeInt(object.getSize());
3034        }
3035
3036        @Override
3037        public int getFixedSize() {
3038            return 12;
3039        }
3040
3041        @Override
3042        public Location deepCopy(Location source) {
3043            return new Location(source);
3044        }
3045
3046        @Override
3047        public boolean isDeepCopySupported() {
3048            return true;
3049        }
3050    }
3051
3052    private void addAckLocation(Transaction tx, StoredDestination sd, Long messageSequence, String subscriptionKey) throws IOException {
3053        SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey);
3054        if (sequences == null) {
3055            sequences = new SequenceSet();
3056            sequences.add(messageSequence);
3057            sd.ackPositions.add(tx, subscriptionKey, sequences);
3058        } else {
3059            sequences.add(messageSequence);
3060            sd.ackPositions.put(tx, subscriptionKey, sequences);
3061        }
3062    }
3063
3064    // new sub is interested in potentially all existing messages
3065    private void addAckLocationForRetroactiveSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3066        SequenceSet allOutstanding = new SequenceSet();
3067        Iterator<Map.Entry<String, SequenceSet>> iterator = sd.ackPositions.iterator(tx);
3068        while (iterator.hasNext()) {
3069            SequenceSet set = iterator.next().getValue();
3070            for (Long entry : set) {
3071                allOutstanding.add(entry);
3072            }
3073        }
3074        sd.ackPositions.put(tx, subscriptionKey, allOutstanding);
3075    }
3076
3077    // on a new message add, all existing subs are interested in this message
3078    private void addAckLocationForNewMessage(Transaction tx, KahaDestination kahaDest,
3079            StoredDestination sd, Long messageSequence) throws IOException {
3080        for(String subscriptionKey : sd.subscriptionCache) {
3081            SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey);
3082            if (sequences == null) {
3083                sequences = new SequenceSet();
3084                sequences.add(new Sequence(messageSequence, messageSequence + 1));
3085                sd.ackPositions.add(tx, subscriptionKey, sequences);
3086            } else {
3087                sequences.add(new Sequence(messageSequence, messageSequence + 1));
3088                sd.ackPositions.put(tx, subscriptionKey, sequences);
3089            }
3090
3091            MessageKeys key = sd.orderIndex.get(tx, messageSequence);
3092            incrementAndAddSizeToStoreStat(kahaDest, subscriptionKey, key.location.getSize());
3093        }
3094    }
3095
3096    private void removeAckLocationsForSub(KahaSubscriptionCommand command,
3097            Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3098        if (!sd.ackPositions.isEmpty(tx)) {
3099            SequenceSet sequences = sd.ackPositions.remove(tx, subscriptionKey);
3100            if (sequences == null || sequences.isEmpty()) {
3101                return;
3102            }
3103
3104            ArrayList<Long> unreferenced = new ArrayList<>();
3105
3106            for(Long sequenceId : sequences) {
3107                if(!isSequenceReferenced(tx, sd, sequenceId)) {
3108                    unreferenced.add(sequenceId);
3109                }
3110            }
3111
3112            for(Long sequenceId : unreferenced) {
3113                // Find all the entries that need to get deleted.
3114                ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<>();
3115                sd.orderIndex.getDeleteList(tx, deletes, sequenceId);
3116
3117                // Do the actual deletes.
3118                for (Entry<Long, MessageKeys> entry : deletes) {
3119                    sd.locationIndex.remove(tx, entry.getValue().location);
3120                    sd.messageIdIndex.remove(tx, entry.getValue().messageId);
3121                    sd.orderIndex.remove(tx, entry.getKey());
3122                    decrementAndSubSizeToStoreStat(tx, command.getDestination(), entry.getValue().location.getSize());
3123                }
3124            }
3125        }
3126    }
3127
3128    private boolean isSequenceReferenced(final Transaction tx, final StoredDestination sd, final Long sequenceId) throws IOException {
3129        for(String subscriptionKey : sd.subscriptionCache) {
3130            SequenceSet sequence = sd.ackPositions.get(tx, subscriptionKey);
3131            if (sequence != null && sequence.contains(sequenceId)) {
3132                return true;
3133            }
3134        }
3135        return false;
3136    }
3137
3138    /**
3139     * @param tx
3140     * @param sd
3141     * @param subscriptionKey
3142     * @param messageSequence
3143     * @throws IOException
3144     */
3145    private void removeAckLocation(KahaRemoveMessageCommand command,
3146            Transaction tx, StoredDestination sd, String subscriptionKey,
3147            Long messageSequence) throws IOException {
3148        // Remove the sub from the previous location set..
3149        if (messageSequence != null) {
3150            SequenceSet range = sd.ackPositions.get(tx, subscriptionKey);
3151            if (range != null && !range.isEmpty()) {
3152                range.remove(messageSequence);
3153                if (!range.isEmpty()) {
3154                    sd.ackPositions.put(tx, subscriptionKey, range);
3155                } else {
3156                    sd.ackPositions.remove(tx, subscriptionKey);
3157                }
3158
3159                MessageKeys key = sd.orderIndex.get(tx, messageSequence);
3160                decrementAndSubSizeToStoreStat(command.getDestination(), subscriptionKey,
3161                        key.location.getSize());
3162
3163                // Check if the message is reference by any other subscription.
3164                if (isSequenceReferenced(tx, sd, messageSequence)) {
3165                    return;
3166                }
3167                // Find all the entries that need to get deleted.
3168                ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<>();
3169                sd.orderIndex.getDeleteList(tx, deletes, messageSequence);
3170
3171                // Do the actual deletes.
3172                for (Entry<Long, MessageKeys> entry : deletes) {
3173                    sd.locationIndex.remove(tx, entry.getValue().location);
3174                    sd.messageIdIndex.remove(tx, entry.getValue().messageId);
3175                    sd.orderIndex.remove(tx, entry.getKey());
3176                    decrementAndSubSizeToStoreStat(tx, command.getDestination(), entry.getValue().location.getSize());
3177                }
3178            }
3179        }
3180    }
3181
3182    public LastAck getLastAck(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3183        return sd.subscriptionAcks.get(tx, subscriptionKey);
3184    }
3185
3186    protected SequenceSet getSequenceSet(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3187        if (sd.ackPositions != null) {
3188            final SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
3189            return messageSequences;
3190        }
3191
3192        return null;
3193    }
3194
3195    protected long getStoredMessageCount(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3196        if (sd.ackPositions != null) {
3197            SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
3198            if (messageSequences != null) {
3199                long result = messageSequences.rangeSize();
3200                // if there's anything in the range the last value is always the nextMessage marker, so remove 1.
3201                return result > 0 ? result - 1 : 0;
3202            }
3203        }
3204
3205        return 0;
3206    }
3207
3208    /**
3209     * Recovers durable subscription pending message size with only 1 pass over the order index on recovery
3210     * instead of iterating over the index once per subscription
3211     *
3212     * @param tx
3213     * @param sd
3214     * @param subscriptionKeys
3215     * @return
3216     * @throws IOException
3217     */
3218    protected Map<String, AtomicLong> getStoredMessageSize(Transaction tx, StoredDestination sd, List<String> subscriptionKeys) throws IOException {
3219
3220        final Map<String, AtomicLong> subPendingMessageSizes = new HashMap<>();
3221        final Map<String, SequenceSet> messageSequencesMap = new HashMap<>();
3222
3223        if (sd.ackPositions != null) {
3224            Long recoveryPosition = null;
3225            //Go through each subscription and find matching ackPositions and their first
3226            //position to find the initial recovery position which is the first message across all subs
3227            //that needs to still be acked
3228            for (String subscriptionKey : subscriptionKeys) {
3229                subPendingMessageSizes.put(subscriptionKey, new AtomicLong());
3230                final SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
3231                if (messageSequences != null && !messageSequences.isEmpty()) {
3232                    final long head = messageSequences.getHead().getFirst();
3233                    recoveryPosition = recoveryPosition != null ? Math.min(recoveryPosition, head) : head;
3234                    //cache the SequenceSet to speed up recovery of metrics below and avoid a second index hit
3235                    messageSequencesMap.put(subscriptionKey, messageSequences);
3236                }
3237            }
3238            recoveryPosition = recoveryPosition != null ? recoveryPosition : 0;
3239
3240            final Iterator<Entry<Long, MessageKeys>> iterator = sd.orderIndex.iterator(tx,
3241                    new MessageOrderCursor(recoveryPosition));
3242
3243            //iterate through all messages starting at the recovery position to recover metrics
3244            while (iterator.hasNext()) {
3245                final Entry<Long, MessageKeys> messageEntry = iterator.next();
3246
3247                //For each message in the index check if each subscription needs to ack the message still
3248                //if the ackPositions SequenceSet contains the message then it has not been acked and should be
3249                //added to the pending metrics for that subscription
3250                for (Entry<String, SequenceSet> seqEntry : messageSequencesMap.entrySet()) {
3251                    final String subscriptionKey = seqEntry.getKey();
3252                    final SequenceSet messageSequences = messageSequencesMap.get(subscriptionKey);
3253                    if (messageSequences.contains(messageEntry.getKey())) {
3254                        subPendingMessageSizes.get(subscriptionKey).addAndGet(messageEntry.getValue().location.getSize());
3255                    }
3256                }
3257            }
3258        }
3259
3260        return subPendingMessageSizes;
3261    }
3262
3263    protected long getStoredMessageSize(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3264        long locationSize = 0;
3265
3266        if (sd.ackPositions != null) {
3267            //grab the messages attached to this subscription
3268            SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
3269
3270            if (messageSequences != null && !messageSequences.isEmpty()) {
3271                final Sequence head = messageSequences.getHead();
3272
3273                //get an iterator over the order index starting at the first unacked message
3274                //and go over each message to add up the size
3275                Iterator<Entry<Long, MessageKeys>> iterator = sd.orderIndex.iterator(tx,
3276                        new MessageOrderCursor(head.getFirst()));
3277
3278                final boolean contiguousRange = messageSequences.size() == 1;
3279                while (iterator.hasNext()) {
3280                    Entry<Long, MessageKeys> entry = iterator.next();
3281                    //Verify sequence contains the key
3282                    //if contiguous we just add all starting with the first but if not
3283                    //we need to check if the id is part of the range - could happen if individual ack mode was used
3284                    if (contiguousRange || messageSequences.contains(entry.getKey())) {
3285                        locationSize += entry.getValue().location.getSize();
3286                    }
3287                }
3288            }
3289        }
3290
3291        return locationSize;
3292    }
3293
3294    protected String key(KahaDestination destination) {
3295        return destination.getType().getNumber() + ":" + destination.getName();
3296    }
3297
3298    // /////////////////////////////////////////////////////////////////
3299    // Transaction related implementation methods.
3300    // /////////////////////////////////////////////////////////////////
3301    @SuppressWarnings("rawtypes")
3302    private final LinkedHashMap<TransactionId, List<Operation>> inflightTransactions = new LinkedHashMap<>();
3303    @SuppressWarnings("rawtypes")
3304    protected final LinkedHashMap<TransactionId, List<Operation>> preparedTransactions = new LinkedHashMap<>();
3305
3306    @SuppressWarnings("rawtypes")
3307    private List<Operation> getInflightTx(KahaTransactionInfo info) {
3308        TransactionId key = TransactionIdConversion.convert(info);
3309        List<Operation> tx;
3310        synchronized (inflightTransactions) {
3311            tx = inflightTransactions.get(key);
3312            if (tx == null) {
3313                tx = Collections.synchronizedList(new ArrayList<Operation>());
3314                inflightTransactions.put(key, tx);
3315            }
3316        }
3317        return tx;
3318    }
3319
3320    @SuppressWarnings("unused")
3321    private TransactionId key(KahaTransactionInfo transactionInfo) {
3322        return TransactionIdConversion.convert(transactionInfo);
3323    }
3324
3325    abstract class Operation <T extends JournalCommand<T>> {
3326        final T command;
3327        final Location location;
3328
3329        public Operation(T command, Location location) {
3330            this.command = command;
3331            this.location = location;
3332        }
3333
3334        public Location getLocation() {
3335            return location;
3336        }
3337
3338        public T getCommand() {
3339            return command;
3340        }
3341
3342        abstract public void execute(Transaction tx) throws IOException;
3343    }
3344
3345    class AddOperation extends Operation<KahaAddMessageCommand> {
3346        final IndexAware runWithIndexLock;
3347        public AddOperation(KahaAddMessageCommand command, Location location, IndexAware runWithIndexLock) {
3348            super(command, location);
3349            this.runWithIndexLock = runWithIndexLock;
3350        }
3351
3352        @Override
3353        public void execute(Transaction tx) throws IOException {
3354            long seq = updateIndex(tx, command, location);
3355            if (runWithIndexLock != null) {
3356                runWithIndexLock.sequenceAssignedWithIndexLocked(seq);
3357            }
3358        }
3359    }
3360
3361    class RemoveOperation extends Operation<KahaRemoveMessageCommand> {
3362
3363        public RemoveOperation(KahaRemoveMessageCommand command, Location location) {
3364            super(command, location);
3365        }
3366
3367        @Override
3368        public void execute(Transaction tx) throws IOException {
3369            updateIndex(tx, command, location);
3370        }
3371    }
3372
3373    // /////////////////////////////////////////////////////////////////
3374    // Initialization related implementation methods.
3375    // /////////////////////////////////////////////////////////////////
3376
3377    private PageFile createPageFile() throws IOException {
3378        if (indexDirectory == null) {
3379            indexDirectory = directory;
3380        }
3381        IOHelper.mkdirs(indexDirectory);
3382        PageFile index = new PageFile(indexDirectory, "db");
3383        index.setEnableWriteThread(isEnableIndexWriteAsync());
3384        index.setWriteBatchSize(getIndexWriteBatchSize());
3385        index.setPageCacheSize(indexCacheSize);
3386        index.setUseLFRUEviction(isUseIndexLFRUEviction());
3387        index.setLFUEvictionFactor(getIndexLFUEvictionFactor());
3388        index.setEnableDiskSyncs(isEnableIndexDiskSyncs());
3389        index.setEnableRecoveryFile(isEnableIndexRecoveryFile());
3390        index.setEnablePageCaching(isEnableIndexPageCaching());
3391        return index;
3392    }
3393
3394    protected Journal createJournal() throws IOException {
3395        Journal manager = new Journal();
3396        manager.setDirectory(directory);
3397        manager.setMaxFileLength(getJournalMaxFileLength());
3398        manager.setCheckForCorruptionOnStartup(checkForCorruptJournalFiles);
3399        manager.setChecksum(checksumJournalFiles || checkForCorruptJournalFiles);
3400        manager.setWriteBatchSize(getJournalMaxWriteBatchSize());
3401        manager.setArchiveDataLogs(isArchiveDataLogs());
3402        manager.setSizeAccumulator(journalSize);
3403        manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs());
3404        manager.setPreallocationScope(Journal.PreallocationScope.valueOf(preallocationScope.trim().toUpperCase()));
3405        manager.setPreallocationStrategy(
3406                Journal.PreallocationStrategy.valueOf(preallocationStrategy.trim().toUpperCase()));
3407        manager.setJournalDiskSyncStrategy(journalDiskSyncStrategy);
3408        if (getDirectoryArchive() != null) {
3409            IOHelper.mkdirs(getDirectoryArchive());
3410            manager.setDirectoryArchive(getDirectoryArchive());
3411        }
3412        return manager;
3413    }
3414
3415    private Metadata createMetadata() {
3416        Metadata md = new Metadata();
3417        md.producerSequenceIdTracker.setAuditDepth(getFailoverProducersAuditDepth());
3418        md.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(getMaxFailoverProducersToTrack());
3419        return md;
3420    }
3421
3422    protected abstract void configureMetadata();
3423
3424    public int getJournalMaxWriteBatchSize() {
3425        return journalMaxWriteBatchSize;
3426    }
3427
3428    public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) {
3429        this.journalMaxWriteBatchSize = journalMaxWriteBatchSize;
3430    }
3431
3432    public File getDirectory() {
3433        return directory;
3434    }
3435
3436    public void setDirectory(File directory) {
3437        this.directory = directory;
3438    }
3439
3440    public boolean isDeleteAllMessages() {
3441        return deleteAllMessages;
3442    }
3443
3444    public void setDeleteAllMessages(boolean deleteAllMessages) {
3445        this.deleteAllMessages = deleteAllMessages;
3446    }
3447
3448    public void setIndexWriteBatchSize(int setIndexWriteBatchSize) {
3449        this.setIndexWriteBatchSize = setIndexWriteBatchSize;
3450    }
3451
3452    public int getIndexWriteBatchSize() {
3453        return setIndexWriteBatchSize;
3454    }
3455
3456    public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) {
3457        this.enableIndexWriteAsync = enableIndexWriteAsync;
3458    }
3459
3460    boolean isEnableIndexWriteAsync() {
3461        return enableIndexWriteAsync;
3462    }
3463
3464    /**
3465     * @deprecated use {@link #getJournalDiskSyncStrategyEnum} or {@link #getJournalDiskSyncStrategy} instead
3466     * @return
3467     */
3468    @Deprecated
3469    public boolean isEnableJournalDiskSyncs() {
3470        return journalDiskSyncStrategy == JournalDiskSyncStrategy.ALWAYS;
3471    }
3472
3473    /**
3474     * @deprecated use {@link #setEnableJournalDiskSyncs} instead
3475     * @param syncWrites
3476     */
3477    @Deprecated
3478    public void setEnableJournalDiskSyncs(boolean syncWrites) {
3479        if (syncWrites) {
3480            journalDiskSyncStrategy = JournalDiskSyncStrategy.ALWAYS;
3481        } else {
3482            journalDiskSyncStrategy = JournalDiskSyncStrategy.NEVER;
3483        }
3484    }
3485
3486    public JournalDiskSyncStrategy getJournalDiskSyncStrategyEnum() {
3487        return journalDiskSyncStrategy;
3488    }
3489
3490    public String getJournalDiskSyncStrategy() {
3491        return journalDiskSyncStrategy.name();
3492    }
3493
3494    public void setJournalDiskSyncStrategy(String journalDiskSyncStrategy) {
3495        this.journalDiskSyncStrategy = JournalDiskSyncStrategy.valueOf(journalDiskSyncStrategy.trim().toUpperCase());
3496    }
3497
3498    public long getJournalDiskSyncInterval() {
3499        return journalDiskSyncInterval;
3500    }
3501
3502    public void setJournalDiskSyncInterval(long journalDiskSyncInterval) {
3503        this.journalDiskSyncInterval = journalDiskSyncInterval;
3504    }
3505
3506    public long getCheckpointInterval() {
3507        return checkpointInterval;
3508    }
3509
3510    public void setCheckpointInterval(long checkpointInterval) {
3511        this.checkpointInterval = checkpointInterval;
3512    }
3513
3514    public long getCleanupInterval() {
3515        return cleanupInterval;
3516    }
3517
3518    public void setCleanupInterval(long cleanupInterval) {
3519        this.cleanupInterval = cleanupInterval;
3520    }
3521
3522    public boolean getCleanupOnStop() {
3523        return cleanupOnStop;
3524    }
3525
3526    public void setCleanupOnStop(boolean cleanupOnStop) {
3527        this.cleanupOnStop = cleanupOnStop;
3528    }
3529
3530    public void setJournalMaxFileLength(int journalMaxFileLength) {
3531        this.journalMaxFileLength = journalMaxFileLength;
3532    }
3533
3534    public int getJournalMaxFileLength() {
3535        return journalMaxFileLength;
3536    }
3537
3538    public void setMaxFailoverProducersToTrack(int maxFailoverProducersToTrack) {
3539        this.metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack);
3540    }
3541
3542    public int getMaxFailoverProducersToTrack() {
3543        return this.metadata.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack();
3544    }
3545
3546    public void setFailoverProducersAuditDepth(int failoverProducersAuditDepth) {
3547        this.metadata.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth);
3548    }
3549
3550    public int getFailoverProducersAuditDepth() {
3551        return this.metadata.producerSequenceIdTracker.getAuditDepth();
3552    }
3553
3554    public PageFile getPageFile() throws IOException {
3555        if (pageFile == null) {
3556            pageFile = createPageFile();
3557        }
3558        return pageFile;
3559    }
3560
3561    public Journal getJournal() throws IOException {
3562        if (journal == null) {
3563            journal = createJournal();
3564        }
3565        return journal;
3566    }
3567
3568    protected Metadata getMetadata() {
3569        return metadata;
3570    }
3571
3572    public boolean isFailIfDatabaseIsLocked() {
3573        return failIfDatabaseIsLocked;
3574    }
3575
3576    public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) {
3577        this.failIfDatabaseIsLocked = failIfDatabaseIsLocked;
3578    }
3579
3580    public boolean isIgnoreMissingJournalfiles() {
3581        return ignoreMissingJournalfiles;
3582    }
3583
3584    public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) {
3585        this.ignoreMissingJournalfiles = ignoreMissingJournalfiles;
3586    }
3587
3588    public int getIndexCacheSize() {
3589        return indexCacheSize;
3590    }
3591
3592    public void setIndexCacheSize(int indexCacheSize) {
3593        this.indexCacheSize = indexCacheSize;
3594    }
3595
3596    public boolean isCheckForCorruptJournalFiles() {
3597        return checkForCorruptJournalFiles;
3598    }
3599
3600    public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) {
3601        this.checkForCorruptJournalFiles = checkForCorruptJournalFiles;
3602    }
3603
3604    public PurgeRecoveredXATransactionStrategy getPurgeRecoveredXATransactionStrategyEnum() {
3605        return purgeRecoveredXATransactionStrategy;
3606    }
3607
3608    public String getPurgeRecoveredXATransactionStrategy() {
3609        return purgeRecoveredXATransactionStrategy.name();
3610    }
3611
3612    public void setPurgeRecoveredXATransactionStrategy(String purgeRecoveredXATransactionStrategy) {
3613        this.purgeRecoveredXATransactionStrategy = PurgeRecoveredXATransactionStrategy.valueOf(
3614                purgeRecoveredXATransactionStrategy.trim().toUpperCase());
3615    }
3616
3617    public boolean isChecksumJournalFiles() {
3618        return checksumJournalFiles;
3619    }
3620
3621    public void setChecksumJournalFiles(boolean checksumJournalFiles) {
3622        this.checksumJournalFiles = checksumJournalFiles;
3623    }
3624
3625    @Override
3626    public void setBrokerService(BrokerService brokerService) {
3627        this.brokerService = brokerService;
3628    }
3629
3630    /**
3631     * @return the archiveDataLogs
3632     */
3633    public boolean isArchiveDataLogs() {
3634        return this.archiveDataLogs;
3635    }
3636
3637    /**
3638     * @param archiveDataLogs the archiveDataLogs to set
3639     */
3640    public void setArchiveDataLogs(boolean archiveDataLogs) {
3641        this.archiveDataLogs = archiveDataLogs;
3642    }
3643
3644    /**
3645     * @return the directoryArchive
3646     */
3647    public File getDirectoryArchive() {
3648        return this.directoryArchive;
3649    }
3650
3651    /**
3652     * @param directoryArchive the directoryArchive to set
3653     */
3654    public void setDirectoryArchive(File directoryArchive) {
3655        this.directoryArchive = directoryArchive;
3656    }
3657
3658    public boolean isArchiveCorruptedIndex() {
3659        return archiveCorruptedIndex;
3660    }
3661
3662    public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) {
3663        this.archiveCorruptedIndex = archiveCorruptedIndex;
3664    }
3665
3666    public float getIndexLFUEvictionFactor() {
3667        return indexLFUEvictionFactor;
3668    }
3669
3670    public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) {
3671        this.indexLFUEvictionFactor = indexLFUEvictionFactor;
3672    }
3673
3674    public boolean isUseIndexLFRUEviction() {
3675        return useIndexLFRUEviction;
3676    }
3677
3678    public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) {
3679        this.useIndexLFRUEviction = useIndexLFRUEviction;
3680    }
3681
3682    public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) {
3683        this.enableIndexDiskSyncs = enableIndexDiskSyncs;
3684    }
3685
3686    public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) {
3687        this.enableIndexRecoveryFile = enableIndexRecoveryFile;
3688    }
3689
3690    public void setEnableIndexPageCaching(boolean enableIndexPageCaching) {
3691        this.enableIndexPageCaching = enableIndexPageCaching;
3692    }
3693
3694    public boolean isEnableIndexDiskSyncs() {
3695        return enableIndexDiskSyncs;
3696    }
3697
3698    public boolean isEnableIndexRecoveryFile() {
3699        return enableIndexRecoveryFile;
3700    }
3701
3702    public boolean isEnableIndexPageCaching() {
3703        return enableIndexPageCaching;
3704    }
3705
3706    public PersistenceAdapterStatistics getPersistenceAdapterStatistics() {
3707        return this.persistenceAdapterStatistics;
3708    }
3709
3710    // /////////////////////////////////////////////////////////////////
3711    // Internal conversion methods.
3712    // /////////////////////////////////////////////////////////////////
3713
3714    class MessageOrderCursor{
3715        long defaultCursorPosition;
3716        long lowPriorityCursorPosition;
3717        long highPriorityCursorPosition;
3718        MessageOrderCursor(){
3719        }
3720
3721        MessageOrderCursor(long position){
3722            this.defaultCursorPosition=position;
3723            this.lowPriorityCursorPosition=position;
3724            this.highPriorityCursorPosition=position;
3725        }
3726
3727        MessageOrderCursor(MessageOrderCursor other){
3728            this.defaultCursorPosition=other.defaultCursorPosition;
3729            this.lowPriorityCursorPosition=other.lowPriorityCursorPosition;
3730            this.highPriorityCursorPosition=other.highPriorityCursorPosition;
3731        }
3732
3733        MessageOrderCursor copy() {
3734            return new MessageOrderCursor(this);
3735        }
3736
3737        void reset() {
3738            this.defaultCursorPosition=0;
3739            this.highPriorityCursorPosition=0;
3740            this.lowPriorityCursorPosition=0;
3741        }
3742
3743        void increment() {
3744            if (defaultCursorPosition!=0) {
3745                defaultCursorPosition++;
3746            }
3747            if (highPriorityCursorPosition!=0) {
3748                highPriorityCursorPosition++;
3749            }
3750            if (lowPriorityCursorPosition!=0) {
3751                lowPriorityCursorPosition++;
3752            }
3753        }
3754
3755        @Override
3756        public String toString() {
3757           return "MessageOrderCursor:[def:" + defaultCursorPosition
3758                   + ", low:" + lowPriorityCursorPosition
3759                   + ", high:" +  highPriorityCursorPosition + "]";
3760        }
3761
3762        public void sync(MessageOrderCursor other) {
3763            this.defaultCursorPosition=other.defaultCursorPosition;
3764            this.lowPriorityCursorPosition=other.lowPriorityCursorPosition;
3765            this.highPriorityCursorPosition=other.highPriorityCursorPosition;
3766        }
3767    }
3768
3769    class MessageOrderIndex {
3770        static final byte HI = 9;
3771        static final byte LO = 0;
3772        static final byte DEF = 4;
3773
3774        long nextMessageId;
3775        BTreeIndex<Long, MessageKeys> defaultPriorityIndex;
3776        BTreeIndex<Long, MessageKeys> lowPriorityIndex;
3777        BTreeIndex<Long, MessageKeys> highPriorityIndex;
3778        final MessageOrderCursor cursor = new MessageOrderCursor();
3779        Long lastDefaultKey;
3780        Long lastHighKey;
3781        Long lastLowKey;
3782        byte lastGetPriority;
3783        final List<Long> pendingAdditions = new LinkedList<>();
3784        final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller();
3785
3786        MessageKeys remove(Transaction tx, Long key) throws IOException {
3787            MessageKeys result = defaultPriorityIndex.remove(tx, key);
3788            if (result == null && highPriorityIndex!=null) {
3789                result = highPriorityIndex.remove(tx, key);
3790                if (result ==null && lowPriorityIndex!=null) {
3791                    result = lowPriorityIndex.remove(tx, key);
3792                }
3793            }
3794            return result;
3795        }
3796
3797        void load(Transaction tx) throws IOException {
3798            defaultPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3799            defaultPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3800            defaultPriorityIndex.load(tx);
3801            lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3802            lowPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3803            lowPriorityIndex.load(tx);
3804            highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3805            highPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3806            highPriorityIndex.load(tx);
3807        }
3808
3809        void allocate(Transaction tx) throws IOException {
3810            defaultPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3811            if (metadata.version >= 2) {
3812                lowPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3813                highPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3814            }
3815        }
3816
3817        void configureLast(Transaction tx) throws IOException {
3818            // Figure out the next key using the last entry in the destination.
3819            TreeSet<Long> orderedSet = new TreeSet<>();
3820
3821            addLast(orderedSet, highPriorityIndex, tx);
3822            addLast(orderedSet, defaultPriorityIndex, tx);
3823            addLast(orderedSet, lowPriorityIndex, tx);
3824
3825            if (!orderedSet.isEmpty()) {
3826                nextMessageId = orderedSet.last() + 1;
3827            }
3828        }
3829
3830        private void addLast(TreeSet<Long> orderedSet, BTreeIndex<Long, MessageKeys> index, Transaction tx) throws IOException {
3831            if (index != null) {
3832                Entry<Long, MessageKeys> lastEntry = index.getLast(tx);
3833                if (lastEntry != null) {
3834                    orderedSet.add(lastEntry.getKey());
3835                }
3836            }
3837        }
3838
3839        void clear(Transaction tx) throws IOException {
3840            this.remove(tx);
3841            this.resetCursorPosition();
3842            this.allocate(tx);
3843            this.load(tx);
3844            this.configureLast(tx);
3845        }
3846
3847        void remove(Transaction tx) throws IOException {
3848            defaultPriorityIndex.clear(tx);
3849            defaultPriorityIndex.unload(tx);
3850            tx.free(defaultPriorityIndex.getPageId());
3851            if (lowPriorityIndex != null) {
3852                lowPriorityIndex.clear(tx);
3853                lowPriorityIndex.unload(tx);
3854
3855                tx.free(lowPriorityIndex.getPageId());
3856            }
3857            if (highPriorityIndex != null) {
3858                highPriorityIndex.clear(tx);
3859                highPriorityIndex.unload(tx);
3860                tx.free(highPriorityIndex.getPageId());
3861            }
3862        }
3863
3864        void resetCursorPosition() {
3865            this.cursor.reset();
3866            lastDefaultKey = null;
3867            lastHighKey = null;
3868            lastLowKey = null;
3869        }
3870
3871        void setBatch(Transaction tx, Long sequence) throws IOException {
3872            if (sequence != null) {
3873                Long nextPosition = new Long(sequence.longValue() + 1);
3874                lastDefaultKey = sequence;
3875                cursor.defaultCursorPosition = nextPosition.longValue();
3876                lastHighKey = sequence;
3877                cursor.highPriorityCursorPosition = nextPosition.longValue();
3878                lastLowKey = sequence;
3879                cursor.lowPriorityCursorPosition = nextPosition.longValue();
3880            }
3881        }
3882
3883        void setBatch(Transaction tx, LastAck last) throws IOException {
3884            setBatch(tx, last.lastAckedSequence);
3885            if (cursor.defaultCursorPosition == 0
3886                    && cursor.highPriorityCursorPosition == 0
3887                    && cursor.lowPriorityCursorPosition == 0) {
3888                long next = last.lastAckedSequence + 1;
3889                switch (last.priority) {
3890                    case DEF:
3891                        cursor.defaultCursorPosition = next;
3892                        cursor.highPriorityCursorPosition = next;
3893                        break;
3894                    case HI:
3895                        cursor.highPriorityCursorPosition = next;
3896                        break;
3897                    case LO:
3898                        cursor.lowPriorityCursorPosition = next;
3899                        cursor.defaultCursorPosition = next;
3900                        cursor.highPriorityCursorPosition = next;
3901                        break;
3902                }
3903            }
3904        }
3905
3906        void stoppedIterating() {
3907            if (lastDefaultKey!=null) {
3908                cursor.defaultCursorPosition=lastDefaultKey.longValue()+1;
3909            }
3910            if (lastHighKey!=null) {
3911                cursor.highPriorityCursorPosition=lastHighKey.longValue()+1;
3912            }
3913            if (lastLowKey!=null) {
3914                cursor.lowPriorityCursorPosition=lastLowKey.longValue()+1;
3915            }
3916            lastDefaultKey = null;
3917            lastHighKey = null;
3918            lastLowKey = null;
3919        }
3920
3921        void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, Long sequenceId)
3922                throws IOException {
3923            if (defaultPriorityIndex.containsKey(tx, sequenceId)) {
3924                getDeleteList(tx, deletes, defaultPriorityIndex, sequenceId);
3925            } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequenceId)) {
3926                getDeleteList(tx, deletes, highPriorityIndex, sequenceId);
3927            } else if (lowPriorityIndex != null && lowPriorityIndex.containsKey(tx, sequenceId)) {
3928                getDeleteList(tx, deletes, lowPriorityIndex, sequenceId);
3929            }
3930        }
3931
3932        void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes,
3933                BTreeIndex<Long, MessageKeys> index, Long sequenceId) throws IOException {
3934
3935            Iterator<Entry<Long, MessageKeys>> iterator = index.iterator(tx, sequenceId, null);
3936            deletes.add(iterator.next());
3937        }
3938
3939        long getNextMessageId() {
3940            return nextMessageId++;
3941        }
3942
3943        void revertNextMessageId() {
3944            nextMessageId--;
3945        }
3946
3947        MessageKeys get(Transaction tx, Long key) throws IOException {
3948            MessageKeys result = defaultPriorityIndex.get(tx, key);
3949            if (result == null) {
3950                result = highPriorityIndex.get(tx, key);
3951                if (result == null) {
3952                    result = lowPriorityIndex.get(tx, key);
3953                    lastGetPriority = LO;
3954                } else {
3955                    lastGetPriority = HI;
3956                }
3957            } else {
3958                lastGetPriority = DEF;
3959            }
3960            return result;
3961        }
3962
3963        MessageKeys put(Transaction tx, int priority, Long key, MessageKeys value) throws IOException {
3964            if (priority == javax.jms.Message.DEFAULT_PRIORITY) {
3965                return defaultPriorityIndex.put(tx, key, value);
3966            } else if (priority > javax.jms.Message.DEFAULT_PRIORITY) {
3967                return highPriorityIndex.put(tx, key, value);
3968            } else {
3969                return lowPriorityIndex.put(tx, key, value);
3970            }
3971        }
3972
3973        Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx) throws IOException{
3974            return new MessageOrderIterator(tx,cursor,this);
3975        }
3976
3977        Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx, MessageOrderCursor m) throws IOException{
3978            return new MessageOrderIterator(tx,m,this);
3979        }
3980
3981        public byte lastGetPriority() {
3982            return lastGetPriority;
3983        }
3984
3985        public boolean alreadyDispatched(Long sequence) {
3986            return (cursor.highPriorityCursorPosition > 0 && cursor.highPriorityCursorPosition >= sequence) ||
3987                    (cursor.defaultCursorPosition > 0 && cursor.defaultCursorPosition >= sequence) ||
3988                    (cursor.lowPriorityCursorPosition > 0 && cursor.lowPriorityCursorPosition >= sequence);
3989        }
3990
3991        public void trackPendingAdd(Long seq) {
3992            synchronized (pendingAdditions) {
3993                pendingAdditions.add(seq);
3994            }
3995        }
3996
3997        public void trackPendingAddComplete(Long seq) {
3998            synchronized (pendingAdditions) {
3999                pendingAdditions.remove(seq);
4000            }
4001        }
4002
4003        public Long minPendingAdd() {
4004            synchronized (pendingAdditions) {
4005                if (!pendingAdditions.isEmpty()) {
4006                    return pendingAdditions.get(0);
4007                } else {
4008                    return null;
4009                }
4010            }
4011        }
4012
4013        class MessageOrderIterator implements Iterator<Entry<Long, MessageKeys>>{
4014            Iterator<Entry<Long, MessageKeys>>currentIterator;
4015            final Iterator<Entry<Long, MessageKeys>>highIterator;
4016            final Iterator<Entry<Long, MessageKeys>>defaultIterator;
4017            final Iterator<Entry<Long, MessageKeys>>lowIterator;
4018
4019            MessageOrderIterator(Transaction tx, MessageOrderCursor m, MessageOrderIndex messageOrderIndex) throws IOException {
4020                Long pendingAddLimiter = messageOrderIndex.minPendingAdd();
4021                this.defaultIterator = defaultPriorityIndex.iterator(tx, m.defaultCursorPosition, pendingAddLimiter);
4022                if (highPriorityIndex != null) {
4023                    this.highIterator = highPriorityIndex.iterator(tx, m.highPriorityCursorPosition, pendingAddLimiter);
4024                } else {
4025                    this.highIterator = null;
4026                }
4027                if (lowPriorityIndex != null) {
4028                    this.lowIterator = lowPriorityIndex.iterator(tx, m.lowPriorityCursorPosition, pendingAddLimiter);
4029                } else {
4030                    this.lowIterator = null;
4031                }
4032            }
4033
4034            @Override
4035            public boolean hasNext() {
4036                if (currentIterator == null) {
4037                    if (highIterator != null) {
4038                        if (highIterator.hasNext()) {
4039                            currentIterator = highIterator;
4040                            return currentIterator.hasNext();
4041                        }
4042                        if (defaultIterator.hasNext()) {
4043                            currentIterator = defaultIterator;
4044                            return currentIterator.hasNext();
4045                        }
4046                        if (lowIterator.hasNext()) {
4047                            currentIterator = lowIterator;
4048                            return currentIterator.hasNext();
4049                        }
4050                        return false;
4051                    } else {
4052                        currentIterator = defaultIterator;
4053                        return currentIterator.hasNext();
4054                    }
4055                }
4056                if (highIterator != null) {
4057                    if (currentIterator.hasNext()) {
4058                        return true;
4059                    }
4060                    if (currentIterator == highIterator) {
4061                        if (defaultIterator.hasNext()) {
4062                            currentIterator = defaultIterator;
4063                            return currentIterator.hasNext();
4064                        }
4065                        if (lowIterator.hasNext()) {
4066                            currentIterator = lowIterator;
4067                            return currentIterator.hasNext();
4068                        }
4069                        return false;
4070                    }
4071
4072                    if (currentIterator == defaultIterator) {
4073                        if (lowIterator.hasNext()) {
4074                            currentIterator = lowIterator;
4075                            return currentIterator.hasNext();
4076                        }
4077                        return false;
4078                    }
4079                }
4080                return currentIterator.hasNext();
4081            }
4082
4083            @Override
4084            public Entry<Long, MessageKeys> next() {
4085                Entry<Long, MessageKeys> result = currentIterator.next();
4086                if (result != null) {
4087                    Long key = result.getKey();
4088                    if (highIterator != null) {
4089                        if (currentIterator == defaultIterator) {
4090                            lastDefaultKey = key;
4091                        } else if (currentIterator == highIterator) {
4092                            lastHighKey = key;
4093                        } else {
4094                            lastLowKey = key;
4095                        }
4096                    } else {
4097                        lastDefaultKey = key;
4098                    }
4099                }
4100                return result;
4101            }
4102
4103            @Override
4104            public void remove() {
4105                throw new UnsupportedOperationException();
4106            }
4107        }
4108    }
4109
4110    private static class HashSetStringMarshaller extends VariableMarshaller<HashSet<String>> {
4111        final static HashSetStringMarshaller INSTANCE = new HashSetStringMarshaller();
4112
4113        @Override
4114        public void writePayload(HashSet<String> object, DataOutput dataOut) throws IOException {
4115            ByteArrayOutputStream baos = new ByteArrayOutputStream();
4116            ObjectOutputStream oout = new ObjectOutputStream(baos);
4117            oout.writeObject(object);
4118            oout.flush();
4119            oout.close();
4120            byte[] data = baos.toByteArray();
4121            dataOut.writeInt(data.length);
4122            dataOut.write(data);
4123        }
4124
4125        @Override
4126        @SuppressWarnings("unchecked")
4127        public HashSet<String> readPayload(DataInput dataIn) throws IOException {
4128            int dataLen = dataIn.readInt();
4129            byte[] data = new byte[dataLen];
4130            dataIn.readFully(data);
4131            ByteArrayInputStream bais = new ByteArrayInputStream(data);
4132            ObjectInputStream oin = new MessageDatabaseObjectInputStream(bais);
4133            try {
4134                return (HashSet<String>) oin.readObject();
4135            } catch (ClassNotFoundException cfe) {
4136                IOException ioe = new IOException("Failed to read HashSet<String>: " + cfe);
4137                ioe.initCause(cfe);
4138                throw ioe;
4139            }
4140        }
4141    }
4142
4143    public File getIndexDirectory() {
4144        return indexDirectory;
4145    }
4146
4147    public void setIndexDirectory(File indexDirectory) {
4148        this.indexDirectory = indexDirectory;
4149    }
4150
4151    interface IndexAware {
4152        public void sequenceAssignedWithIndexLocked(long index);
4153    }
4154
4155    public String getPreallocationScope() {
4156        return preallocationScope;
4157    }
4158
4159    public void setPreallocationScope(String preallocationScope) {
4160        this.preallocationScope = preallocationScope;
4161    }
4162
4163    public String getPreallocationStrategy() {
4164        return preallocationStrategy;
4165    }
4166
4167    public void setPreallocationStrategy(String preallocationStrategy) {
4168        this.preallocationStrategy = preallocationStrategy;
4169    }
4170
4171    public int getCompactAcksAfterNoGC() {
4172        return compactAcksAfterNoGC;
4173    }
4174
4175    /**
4176     * Sets the number of GC cycles where no journal logs were removed before an attempt to
4177     * move forward all the acks in the last log that contains them and is otherwise unreferenced.
4178     * <p>
4179     * A value of -1 will disable this feature.
4180     *
4181     * @param compactAcksAfterNoGC
4182     *      Number of empty GC cycles before we rewrite old ACKS.
4183     */
4184    public void setCompactAcksAfterNoGC(int compactAcksAfterNoGC) {
4185        this.compactAcksAfterNoGC = compactAcksAfterNoGC;
4186    }
4187
4188    /**
4189     * Returns whether Ack compaction will ignore that the store is still growing
4190     * and run more often.
4191     *
4192     * @return the compactAcksIgnoresStoreGrowth current value.
4193     */
4194    public boolean isCompactAcksIgnoresStoreGrowth() {
4195        return compactAcksIgnoresStoreGrowth;
4196    }
4197
4198    /**
4199     * Configure if Ack compaction will occur regardless of continued growth of the
4200     * journal logs meaning that the store has not run out of space yet.  Because the
4201     * compaction operation can be costly this value is defaulted to off and the Ack
4202     * compaction is only done when it seems that the store cannot grow and larger.
4203     *
4204     * @param compactAcksIgnoresStoreGrowth the compactAcksIgnoresStoreGrowth to set
4205     */
4206    public void setCompactAcksIgnoresStoreGrowth(boolean compactAcksIgnoresStoreGrowth) {
4207        this.compactAcksIgnoresStoreGrowth = compactAcksIgnoresStoreGrowth;
4208    }
4209
4210    /**
4211     * Returns whether Ack compaction is enabled
4212     *
4213     * @return enableAckCompaction
4214     */
4215    public boolean isEnableAckCompaction() {
4216        return enableAckCompaction;
4217    }
4218
4219    /**
4220     * Configure if the Ack compaction task should be enabled to run
4221     *
4222     * @param enableAckCompaction
4223     */
4224    public void setEnableAckCompaction(boolean enableAckCompaction) {
4225        this.enableAckCompaction = enableAckCompaction;
4226    }
4227
4228    /**
4229     * @return
4230     */
4231    public boolean isEnableSubscriptionStatistics() {
4232        return enableSubscriptionStatistics;
4233    }
4234
4235    /**
4236     * Enable caching statistics for each subscription to allow non-blocking
4237     * retrieval of metrics.  This could incur some overhead to compute if there are a lot
4238     * of subscriptions.
4239     *
4240     * @param enableSubscriptionStatistics
4241     */
4242    public void setEnableSubscriptionStatistics(boolean enableSubscriptionStatistics) {
4243        this.enableSubscriptionStatistics = enableSubscriptionStatistics;
4244    }
4245
4246    private static class MessageDatabaseObjectInputStream extends ObjectInputStream {
4247
4248        public MessageDatabaseObjectInputStream(InputStream is) throws IOException {
4249            super(is);
4250        }
4251
4252        @Override
4253        protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
4254            if (!(desc.getName().startsWith("java.lang.")
4255                    || desc.getName().startsWith("com.thoughtworks.xstream")
4256                    || desc.getName().startsWith("java.util.")
4257                    || desc.getName().length() > 2 && desc.getName().substring(2).startsWith("java.util.") // Allow arrays
4258                    || desc.getName().startsWith("org.apache.activemq."))) {
4259                throw new InvalidClassException("Unauthorized deserialization attempt", desc.getName());
4260            }
4261            return super.resolveClass(desc);
4262        }
4263
4264    }
4265}