001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.activemq.store.kahadb;
018
019import static org.apache.activemq.store.kahadb.disk.journal.Location.NOT_SET;
020
021import java.io.ByteArrayInputStream;
022import java.io.ByteArrayOutputStream;
023import java.io.DataInput;
024import java.io.DataOutput;
025import java.io.EOFException;
026import java.io.File;
027import java.io.IOException;
028import java.io.InputStream;
029import java.io.InterruptedIOException;
030import java.io.ObjectInputStream;
031import java.io.ObjectOutputStream;
032import java.io.OutputStream;
033import java.util.ArrayList;
034import java.util.Arrays;
035import java.util.Collection;
036import java.util.Collections;
037import java.util.Date;
038import java.util.HashMap;
039import java.util.HashSet;
040import java.util.Iterator;
041import java.util.LinkedHashMap;
042import java.util.LinkedHashSet;
043import java.util.LinkedList;
044import java.util.List;
045import java.util.Map;
046import java.util.Map.Entry;
047import java.util.Set;
048import java.util.SortedSet;
049import java.util.TreeMap;
050import java.util.TreeSet;
051import java.util.concurrent.ConcurrentHashMap;
052import java.util.concurrent.ConcurrentMap;
053import java.util.concurrent.Executors;
054import java.util.concurrent.ScheduledExecutorService;
055import java.util.concurrent.ThreadFactory;
056import java.util.concurrent.TimeUnit;
057import java.util.concurrent.atomic.AtomicBoolean;
058import java.util.concurrent.atomic.AtomicLong;
059import java.util.concurrent.atomic.AtomicReference;
060import java.util.concurrent.locks.ReentrantReadWriteLock;
061
062import org.apache.activemq.ActiveMQMessageAuditNoSync;
063import org.apache.activemq.broker.BrokerService;
064import org.apache.activemq.broker.BrokerServiceAware;
065import org.apache.activemq.broker.region.Destination;
066import org.apache.activemq.broker.region.Queue;
067import org.apache.activemq.broker.region.Topic;
068import org.apache.activemq.command.MessageAck;
069import org.apache.activemq.command.TransactionId;
070import org.apache.activemq.openwire.OpenWireFormat;
071import org.apache.activemq.protobuf.Buffer;
072import org.apache.activemq.store.MessageStore;
073import org.apache.activemq.store.MessageStoreStatistics;
074import org.apache.activemq.store.MessageStoreSubscriptionStatistics;
075import org.apache.activemq.store.TopicMessageStore;
076import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand;
077import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand;
078import org.apache.activemq.store.kahadb.data.KahaCommitCommand;
079import org.apache.activemq.store.kahadb.data.KahaDestination;
080import org.apache.activemq.store.kahadb.data.KahaEntryType;
081import org.apache.activemq.store.kahadb.data.KahaPrepareCommand;
082import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand;
083import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand;
084import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand;
085import org.apache.activemq.store.kahadb.data.KahaRewrittenDataFileCommand;
086import org.apache.activemq.store.kahadb.data.KahaRollbackCommand;
087import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand;
088import org.apache.activemq.store.kahadb.data.KahaTraceCommand;
089import org.apache.activemq.store.kahadb.data.KahaTransactionInfo;
090import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand;
091import org.apache.activemq.store.kahadb.disk.index.BTreeIndex;
092import org.apache.activemq.store.kahadb.disk.index.BTreeVisitor;
093import org.apache.activemq.store.kahadb.disk.index.ListIndex;
094import org.apache.activemq.store.kahadb.disk.journal.DataFile;
095import org.apache.activemq.store.kahadb.disk.journal.Journal;
096import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
097import org.apache.activemq.store.kahadb.disk.journal.Location;
098import org.apache.activemq.store.kahadb.disk.journal.TargetedDataFileAppender;
099import org.apache.activemq.store.kahadb.disk.page.Page;
100import org.apache.activemq.store.kahadb.disk.page.PageFile;
101import org.apache.activemq.store.kahadb.disk.page.Transaction;
102import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller;
103import org.apache.activemq.store.kahadb.disk.util.LongMarshaller;
104import org.apache.activemq.store.kahadb.disk.util.Marshaller;
105import org.apache.activemq.store.kahadb.disk.util.Sequence;
106import org.apache.activemq.store.kahadb.disk.util.SequenceSet;
107import org.apache.activemq.store.kahadb.disk.util.StringMarshaller;
108import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
109import org.apache.activemq.util.ByteSequence;
110import org.apache.activemq.util.DataByteArrayInputStream;
111import org.apache.activemq.util.DataByteArrayOutputStream;
112import org.apache.activemq.util.IOExceptionSupport;
113import org.apache.activemq.util.IOHelper;
114import org.apache.activemq.util.ServiceStopper;
115import org.apache.activemq.util.ServiceSupport;
116import org.apache.activemq.util.ThreadPoolUtils;
117import org.slf4j.Logger;
118import org.slf4j.LoggerFactory;
119import org.slf4j.MDC;
120
121public abstract class MessageDatabase extends ServiceSupport implements BrokerServiceAware {
122
123    protected BrokerService brokerService;
124
125    public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME";
126    public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0);
127    public static final File DEFAULT_DIRECTORY = new File("KahaDB");
128    protected static final Buffer UNMATCHED;
129    static {
130        UNMATCHED = new Buffer(new byte[]{});
131    }
132    private static final Logger LOG = LoggerFactory.getLogger(MessageDatabase.class);
133
134    static final int CLOSED_STATE = 1;
135    static final int OPEN_STATE = 2;
136    static final long NOT_ACKED = -1;
137
138    static final int VERSION = 6;
139
140    static final byte COMPACTED_JOURNAL_FILE = DataFile.STANDARD_LOG_FILE + 1;
141
142    protected class Metadata {
143        protected Page<Metadata> page;
144        protected int state;
145        protected BTreeIndex<String, StoredDestination> destinations;
146        protected Location lastUpdate;
147        protected Location firstInProgressTransactionLocation;
148        protected Location producerSequenceIdTrackerLocation = null;
149        protected Location ackMessageFileMapLocation = null;
150        protected transient ActiveMQMessageAuditNoSync producerSequenceIdTracker = new ActiveMQMessageAuditNoSync();
151        protected transient Map<Integer, Set<Integer>> ackMessageFileMap = new HashMap<>();
152        protected int version = VERSION;
153        protected int openwireVersion = OpenWireFormat.DEFAULT_STORE_VERSION;
154
155        public void read(DataInput is) throws IOException {
156            state = is.readInt();
157            destinations = new BTreeIndex<>(pageFile, is.readLong());
158            if (is.readBoolean()) {
159                lastUpdate = LocationMarshaller.INSTANCE.readPayload(is);
160            } else {
161                lastUpdate = null;
162            }
163            if (is.readBoolean()) {
164                firstInProgressTransactionLocation = LocationMarshaller.INSTANCE.readPayload(is);
165            } else {
166                firstInProgressTransactionLocation = null;
167            }
168            try {
169                if (is.readBoolean()) {
170                    producerSequenceIdTrackerLocation = LocationMarshaller.INSTANCE.readPayload(is);
171                } else {
172                    producerSequenceIdTrackerLocation = null;
173                }
174            } catch (EOFException expectedOnUpgrade) {
175            }
176            try {
177                version = is.readInt();
178            } catch (EOFException expectedOnUpgrade) {
179                version = 1;
180            }
181            if (version >= 5 && is.readBoolean()) {
182                ackMessageFileMapLocation = LocationMarshaller.INSTANCE.readPayload(is);
183            } else {
184                ackMessageFileMapLocation = null;
185            }
186            try {
187                openwireVersion = is.readInt();
188            } catch (EOFException expectedOnUpgrade) {
189                openwireVersion = OpenWireFormat.DEFAULT_LEGACY_VERSION;
190            }
191            LOG.info("KahaDB is version " + version);
192        }
193
194        public void write(DataOutput os) throws IOException {
195            os.writeInt(state);
196            os.writeLong(destinations.getPageId());
197
198            if (lastUpdate != null) {
199                os.writeBoolean(true);
200                LocationMarshaller.INSTANCE.writePayload(lastUpdate, os);
201            } else {
202                os.writeBoolean(false);
203            }
204
205            if (firstInProgressTransactionLocation != null) {
206                os.writeBoolean(true);
207                LocationMarshaller.INSTANCE.writePayload(firstInProgressTransactionLocation, os);
208            } else {
209                os.writeBoolean(false);
210            }
211
212            if (producerSequenceIdTrackerLocation != null) {
213                os.writeBoolean(true);
214                LocationMarshaller.INSTANCE.writePayload(producerSequenceIdTrackerLocation, os);
215            } else {
216                os.writeBoolean(false);
217            }
218            os.writeInt(VERSION);
219            if (ackMessageFileMapLocation != null) {
220                os.writeBoolean(true);
221                LocationMarshaller.INSTANCE.writePayload(ackMessageFileMapLocation, os);
222            } else {
223                os.writeBoolean(false);
224            }
225            os.writeInt(this.openwireVersion);
226        }
227    }
228
229    class MetadataMarshaller extends VariableMarshaller<Metadata> {
230        @Override
231        public Metadata readPayload(DataInput dataIn) throws IOException {
232            Metadata rc = createMetadata();
233            rc.read(dataIn);
234            return rc;
235        }
236
237        @Override
238        public void writePayload(Metadata object, DataOutput dataOut) throws IOException {
239            object.write(dataOut);
240        }
241    }
242
243    public enum PurgeRecoveredXATransactionStrategy {
244        NEVER,
245        COMMIT,
246        ROLLBACK;
247    }
248
249    protected PageFile pageFile;
250    protected Journal journal;
251    protected Metadata metadata = new Metadata();
252
253    protected MetadataMarshaller metadataMarshaller = new MetadataMarshaller();
254
255    protected boolean failIfDatabaseIsLocked;
256
257    protected boolean deleteAllMessages;
258    protected File directory = DEFAULT_DIRECTORY;
259    protected File indexDirectory = null;
260    protected ScheduledExecutorService scheduler;
261    private final Object schedulerLock = new Object();
262
263    protected JournalDiskSyncStrategy journalDiskSyncStrategy = JournalDiskSyncStrategy.ALWAYS;
264    protected boolean archiveDataLogs;
265    protected File directoryArchive;
266    protected AtomicLong journalSize = new AtomicLong(0);
267    long journalDiskSyncInterval = 1000;
268    long checkpointInterval = 5*1000;
269    long cleanupInterval = 30*1000;
270    int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH;
271    int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE;
272    boolean enableIndexWriteAsync = false;
273    int setIndexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE;
274    private String preallocationScope = Journal.PreallocationScope.ENTIRE_JOURNAL.name();
275    private String preallocationStrategy = Journal.PreallocationStrategy.SPARSE_FILE.name();
276
277    protected AtomicBoolean opened = new AtomicBoolean();
278    private boolean ignoreMissingJournalfiles = false;
279    private int indexCacheSize = 10000;
280    private boolean checkForCorruptJournalFiles = false;
281    protected PurgeRecoveredXATransactionStrategy purgeRecoveredXATransactionStrategy = PurgeRecoveredXATransactionStrategy.NEVER;
282    private boolean checksumJournalFiles = true;
283    protected boolean forceRecoverIndex = false;
284    private boolean archiveCorruptedIndex = false;
285    private boolean useIndexLFRUEviction = false;
286    private float indexLFUEvictionFactor = 0.2f;
287    private boolean enableIndexDiskSyncs = true;
288    private boolean enableIndexRecoveryFile = true;
289    private boolean enableIndexPageCaching = true;
290    ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock();
291
292    private boolean enableAckCompaction = true;
293    private int compactAcksAfterNoGC = 10;
294    private boolean compactAcksIgnoresStoreGrowth = false;
295    private int checkPointCyclesWithNoGC;
296    private int journalLogOnLastCompactionCheck;
297    private boolean enableSubscriptionStatistics = false;
298
299    //only set when using JournalDiskSyncStrategy.PERIODIC
300    protected final AtomicReference<Location> lastAsyncJournalUpdate = new AtomicReference<>();
301
302    @Override
303    public void doStart() throws Exception {
304        load();
305    }
306
307    @Override
308    public void doStop(ServiceStopper stopper) throws Exception {
309        unload();
310    }
311
312    public void allowIOResumption() {
313        if (pageFile != null) {
314            pageFile.allowIOResumption();
315        }
316        if (journal != null) {
317            journal.allowIOResumption();
318        }
319    }
320
321    private void loadPageFile() throws IOException {
322        this.indexLock.writeLock().lock();
323        try {
324            final PageFile pageFile = getPageFile();
325            pageFile.load();
326            pageFile.tx().execute(new Transaction.Closure<IOException>() {
327                @Override
328                public void execute(Transaction tx) throws IOException {
329                    if (pageFile.getPageCount() == 0) {
330                        // First time this is created.. Initialize the metadata
331                        Page<Metadata> page = tx.allocate();
332                        assert page.getPageId() == 0;
333                        page.set(metadata);
334                        metadata.page = page;
335                        metadata.state = CLOSED_STATE;
336                        metadata.destinations = new BTreeIndex<>(pageFile, tx.allocate().getPageId());
337
338                        tx.store(metadata.page, metadataMarshaller, true);
339                    } else {
340                        Page<Metadata> page = tx.load(0, metadataMarshaller);
341                        metadata = page.get();
342                        metadata.page = page;
343                    }
344                    metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE);
345                    metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller());
346                    metadata.destinations.load(tx);
347                }
348            });
349            // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted.
350            // Perhaps we should just keep an index of file
351            storedDestinations.clear();
352            pageFile.tx().execute(new Transaction.Closure<IOException>() {
353                @Override
354                public void execute(Transaction tx) throws IOException {
355                    for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) {
356                        Entry<String, StoredDestination> entry = iterator.next();
357                        StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null);
358                        storedDestinations.put(entry.getKey(), sd);
359
360                        if (checkForCorruptJournalFiles) {
361                            // sanity check the index also
362                            if (!entry.getValue().locationIndex.isEmpty(tx)) {
363                                if (entry.getValue().orderIndex.nextMessageId <= 0) {
364                                    throw new IOException("Detected uninitialized orderIndex nextMessageId with pending messages for " + entry.getKey());
365                                }
366                            }
367                        }
368                    }
369                }
370            });
371            pageFile.flush();
372        } finally {
373            this.indexLock.writeLock().unlock();
374        }
375    }
376
377    private void startCheckpoint() {
378        if (checkpointInterval == 0 && cleanupInterval == 0) {
379            LOG.info("periodic checkpoint/cleanup disabled, will ocurr on clean shutdown/restart");
380            return;
381        }
382        synchronized (schedulerLock) {
383            if (scheduler == null || scheduler.isShutdown()) {
384                scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
385
386                    @Override
387                    public Thread newThread(Runnable r) {
388                        Thread schedulerThread = new Thread(r);
389
390                        schedulerThread.setName("ActiveMQ Journal Checkpoint Worker");
391                        schedulerThread.setDaemon(true);
392
393                        return schedulerThread;
394                    }
395                });
396
397                // Short intervals for check-point and cleanups
398                long delay;
399                if (journal.isJournalDiskSyncPeriodic()) {
400                    delay = Math.min(journalDiskSyncInterval > 0 ? journalDiskSyncInterval : checkpointInterval, 500);
401                } else {
402                    delay = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500);
403                }
404
405                scheduler.scheduleWithFixedDelay(new CheckpointRunner(), 0, delay, TimeUnit.MILLISECONDS);
406            }
407        }
408    }
409
410    private final class CheckpointRunner implements Runnable {
411
412        private long lastCheckpoint = System.currentTimeMillis();
413        private long lastCleanup = System.currentTimeMillis();
414        private long lastSync = System.currentTimeMillis();
415        private Location lastAsyncUpdate = null;
416
417        @Override
418        public void run() {
419            try {
420                // Decide on cleanup vs full checkpoint here.
421                if (opened.get()) {
422                    long now = System.currentTimeMillis();
423                    if (journal.isJournalDiskSyncPeriodic() &&
424                            journalDiskSyncInterval > 0 && (now - lastSync >= journalDiskSyncInterval)) {
425                        Location currentUpdate = lastAsyncJournalUpdate.get();
426                        if (currentUpdate != null && !currentUpdate.equals(lastAsyncUpdate)) {
427                            lastAsyncUpdate = currentUpdate;
428                            if (LOG.isTraceEnabled()) {
429                                LOG.trace("Writing trace command to trigger journal sync");
430                            }
431                            store(new KahaTraceCommand(), true, null, null);
432                        }
433                        lastSync = now;
434                    }
435                    if (cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval)) {
436                        checkpointCleanup(true);
437                        lastCleanup = now;
438                        lastCheckpoint = now;
439                    } else if (checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval)) {
440                        checkpointCleanup(false);
441                        lastCheckpoint = now;
442                    }
443                }
444            } catch (IOException ioe) {
445                LOG.error("Checkpoint failed", ioe);
446                brokerService.handleIOException(ioe);
447            } catch (Throwable e) {
448                LOG.error("Checkpoint failed", e);
449                brokerService.handleIOException(IOExceptionSupport.create(e));
450            }
451        }
452    }
453
454    public void open() throws IOException {
455        if( opened.compareAndSet(false, true) ) {
456            getJournal().start();
457            try {
458                loadPageFile();
459            } catch (Throwable t) {
460                LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t);
461                if (LOG.isDebugEnabled()) {
462                    LOG.debug("Index load failure", t);
463                }
464                // try to recover index
465                try {
466                    pageFile.unload();
467                } catch (Exception ignore) {}
468                if (archiveCorruptedIndex) {
469                    pageFile.archive();
470                } else {
471                    pageFile.delete();
472                }
473                metadata = createMetadata();
474                //The metadata was recreated after a detect corruption so we need to
475                //reconfigure anything that was configured on the old metadata on startup
476                configureMetadata();
477                pageFile = null;
478                loadPageFile();
479            }
480            recover();
481            startCheckpoint();
482        }
483    }
484
485    public void load() throws IOException {
486        this.indexLock.writeLock().lock();
487        try {
488            IOHelper.mkdirs(directory);
489            if (deleteAllMessages) {
490                getJournal().setCheckForCorruptionOnStartup(false);
491                getJournal().start();
492                getJournal().delete();
493                getJournal().close();
494                journal = null;
495                getPageFile().delete();
496                LOG.info("Persistence store purged.");
497                deleteAllMessages = false;
498            }
499
500            open();
501            store(new KahaTraceCommand().setMessage("LOADED " + new Date()));
502        } finally {
503            this.indexLock.writeLock().unlock();
504        }
505    }
506
507    public void close() throws IOException, InterruptedException {
508        if (opened.compareAndSet(true, false)) {
509            checkpointLock.writeLock().lock();
510            try {
511                if (metadata.page != null) {
512                    checkpointUpdate(true);
513                }
514                pageFile.unload();
515                metadata = createMetadata();
516            } finally {
517                checkpointLock.writeLock().unlock();
518            }
519            journal.close();
520            synchronized(schedulerLock) {
521                if (scheduler != null) {
522                    ThreadPoolUtils.shutdownGraceful(scheduler, -1);
523                    scheduler = null;
524                }
525            }
526            // clear the cache and journalSize on shutdown of the store
527            storeCache.clear();
528            journalSize.set(0);
529        }
530    }
531
532    public void unload() throws IOException, InterruptedException {
533        this.indexLock.writeLock().lock();
534        try {
535            if( pageFile != null && pageFile.isLoaded() ) {
536                metadata.state = CLOSED_STATE;
537                metadata.firstInProgressTransactionLocation = getInProgressTxLocationRange()[0];
538
539                if (metadata.page != null) {
540                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
541                        @Override
542                        public void execute(Transaction tx) throws IOException {
543                            tx.store(metadata.page, metadataMarshaller, true);
544                        }
545                    });
546                }
547            }
548        } finally {
549            this.indexLock.writeLock().unlock();
550        }
551        close();
552    }
553
554    // public for testing
555    @SuppressWarnings("rawtypes")
556    public Location[] getInProgressTxLocationRange() {
557        Location[] range = new Location[]{null, null};
558        synchronized (inflightTransactions) {
559            if (!inflightTransactions.isEmpty()) {
560                for (List<Operation> ops : inflightTransactions.values()) {
561                    if (!ops.isEmpty()) {
562                        trackMaxAndMin(range, ops);
563                    }
564                }
565            }
566            if (!preparedTransactions.isEmpty()) {
567                for (List<Operation> ops : preparedTransactions.values()) {
568                    if (!ops.isEmpty()) {
569                        trackMaxAndMin(range, ops);
570                    }
571                }
572            }
573        }
574        return range;
575    }
576
577    @SuppressWarnings("rawtypes")
578    private void trackMaxAndMin(Location[] range, List<Operation> ops) {
579        Location t = ops.get(0).getLocation();
580        if (range[0] == null || t.compareTo(range[0]) <= 0) {
581            range[0] = t;
582        }
583        t = ops.get(ops.size() -1).getLocation();
584        if (range[1] == null || t.compareTo(range[1]) >= 0) {
585            range[1] = t;
586        }
587    }
588
589    class TranInfo {
590        TransactionId id;
591        Location location;
592
593        class opCount {
594            int add;
595            int remove;
596        }
597        HashMap<KahaDestination, opCount> destinationOpCount = new HashMap<>();
598
599        @SuppressWarnings("rawtypes")
600        public void track(Operation operation) {
601            if (location == null ) {
602                location = operation.getLocation();
603            }
604            KahaDestination destination;
605            boolean isAdd = false;
606            if (operation instanceof AddOperation) {
607                AddOperation add = (AddOperation) operation;
608                destination = add.getCommand().getDestination();
609                isAdd = true;
610            } else {
611                RemoveOperation removeOpperation = (RemoveOperation) operation;
612                destination = removeOpperation.getCommand().getDestination();
613            }
614            opCount opCount = destinationOpCount.get(destination);
615            if (opCount == null) {
616                opCount = new opCount();
617                destinationOpCount.put(destination, opCount);
618            }
619            if (isAdd) {
620                opCount.add++;
621            } else {
622                opCount.remove++;
623            }
624        }
625
626        @Override
627        public String toString() {
628           StringBuffer buffer = new StringBuffer();
629           buffer.append(location).append(";").append(id).append(";\n");
630           for (Entry<KahaDestination, opCount> op : destinationOpCount.entrySet()) {
631               buffer.append(op.getKey()).append('+').append(op.getValue().add).append(',').append('-').append(op.getValue().remove).append(';');
632           }
633           return buffer.toString();
634        }
635    }
636
637    @SuppressWarnings("rawtypes")
638    public String getTransactions() {
639
640        ArrayList<TranInfo> infos = new ArrayList<>();
641        synchronized (inflightTransactions) {
642            if (!inflightTransactions.isEmpty()) {
643                for (Entry<TransactionId, List<Operation>> entry : inflightTransactions.entrySet()) {
644                    TranInfo info = new TranInfo();
645                    info.id = entry.getKey();
646                    for (Operation operation : entry.getValue()) {
647                        info.track(operation);
648                    }
649                    infos.add(info);
650                }
651            }
652        }
653        synchronized (preparedTransactions) {
654            if (!preparedTransactions.isEmpty()) {
655                for (Entry<TransactionId, List<Operation>> entry : preparedTransactions.entrySet()) {
656                    TranInfo info = new TranInfo();
657                    info.id = entry.getKey();
658                    for (Operation operation : entry.getValue()) {
659                        info.track(operation);
660                    }
661                    infos.add(info);
662                }
663            }
664        }
665        return infos.toString();
666    }
667
668    /**
669     * Move all the messages that were in the journal into long term storage. We
670     * just replay and do a checkpoint.
671     *
672     * @throws IOException
673     * @throws IOException
674     * @throws IllegalStateException
675     */
676    private void recover() throws IllegalStateException, IOException {
677        this.indexLock.writeLock().lock();
678        try {
679
680            long start = System.currentTimeMillis();
681            boolean requiresJournalReplay = recoverProducerAudit();
682            requiresJournalReplay |= recoverAckMessageFileMap();
683            Location lastIndoubtPosition = getRecoveryPosition();
684            Location recoveryPosition = requiresJournalReplay ? journal.getNextLocation(null) : lastIndoubtPosition;
685            if (recoveryPosition != null) {
686                int redoCounter = 0;
687                int dataFileRotationTracker = recoveryPosition.getDataFileId();
688                LOG.info("Recovering from the journal @" + recoveryPosition);
689                while (recoveryPosition != null) {
690                    try {
691                        JournalCommand<?> message = load(recoveryPosition);
692                        metadata.lastUpdate = recoveryPosition;
693                        process(message, recoveryPosition, lastIndoubtPosition);
694                        redoCounter++;
695                    } catch (IOException failedRecovery) {
696                        if (isIgnoreMissingJournalfiles()) {
697                            LOG.debug("Failed to recover data at position:" + recoveryPosition, failedRecovery);
698                            // track this dud location
699                            journal.corruptRecoveryLocation(recoveryPosition);
700                        } else {
701                            throw new IOException("Failed to recover data at position:" + recoveryPosition, failedRecovery);
702                        }
703                    }
704                    recoveryPosition = journal.getNextLocation(recoveryPosition);
705                    // hold on to the minimum number of open files during recovery
706                    if (recoveryPosition != null && dataFileRotationTracker != recoveryPosition.getDataFileId()) {
707                        dataFileRotationTracker = recoveryPosition.getDataFileId();
708                        journal.cleanup();
709                    }
710                    if (LOG.isInfoEnabled() && redoCounter % 100000 == 0) {
711                        LOG.info("@" + recoveryPosition + ", " + redoCounter + " entries recovered ..");
712                    }
713                }
714                if (LOG.isInfoEnabled()) {
715                    long end = System.currentTimeMillis();
716                    LOG.info("Recovery replayed " + redoCounter + " operations from the journal in " + ((end - start) / 1000.0f) + " seconds.");
717                }
718            }
719
720            // We may have to undo some index updates.
721            pageFile.tx().execute(new Transaction.Closure<IOException>() {
722                @Override
723                public void execute(Transaction tx) throws IOException {
724                    recoverIndex(tx);
725                }
726            });
727
728            // rollback any recovered inflight local transactions, and discard any inflight XA transactions.
729            Set<TransactionId> toRollback = new HashSet<>();
730            Set<TransactionId> toDiscard = new HashSet<>();
731            synchronized (inflightTransactions) {
732                for (Iterator<TransactionId> it = inflightTransactions.keySet().iterator(); it.hasNext(); ) {
733                    TransactionId id = it.next();
734                    if (id.isLocalTransaction()) {
735                        toRollback.add(id);
736                    } else {
737                        toDiscard.add(id);
738                    }
739                }
740                for (TransactionId tx: toRollback) {
741                    if (LOG.isDebugEnabled()) {
742                        LOG.debug("rolling back recovered indoubt local transaction " + tx);
743                    }
744                    store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convertToLocal(tx)), false, null, null);
745                }
746                for (TransactionId tx: toDiscard) {
747                    if (LOG.isDebugEnabled()) {
748                        LOG.debug("discarding recovered in-flight XA transaction " + tx);
749                    }
750                    inflightTransactions.remove(tx);
751                }
752            }
753
754            synchronized (preparedTransactions) {
755                Set<TransactionId> txIds = new LinkedHashSet<TransactionId>(preparedTransactions.keySet());
756                for (TransactionId txId : txIds) {
757                    switch (purgeRecoveredXATransactionStrategy){
758                        case NEVER:
759                            LOG.warn("Recovered prepared XA TX: [{}]", txId);
760                            break;
761                        case COMMIT:
762                            store(new KahaCommitCommand().setTransactionInfo(TransactionIdConversion.convert(txId)), false, null, null);
763                            LOG.warn("Recovered and Committing prepared XA TX: [{}]", txId);
764                            break;
765                        case ROLLBACK:
766                            store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convert(txId)), false, null, null);
767                            LOG.warn("Recovered and Rolling Back prepared XA TX: [{}]", txId);
768                            break;
769                    }
770                }
771            }
772
773        } finally {
774            this.indexLock.writeLock().unlock();
775        }
776    }
777
778    @SuppressWarnings("unused")
779    private KahaTransactionInfo createLocalTransactionInfo(TransactionId tx) {
780        return TransactionIdConversion.convertToLocal(tx);
781    }
782
783    private Location minimum(Location x,
784                             Location y) {
785        Location min = null;
786        if (x != null) {
787            min = x;
788            if (y != null) {
789                int compare = y.compareTo(x);
790                if (compare < 0) {
791                    min = y;
792                }
793            }
794        } else {
795            min = y;
796        }
797        return min;
798    }
799
800    private boolean recoverProducerAudit() throws IOException {
801        boolean requiresReplay = true;
802        if (metadata.producerSequenceIdTrackerLocation != null) {
803            try {
804                KahaProducerAuditCommand audit = (KahaProducerAuditCommand) load(metadata.producerSequenceIdTrackerLocation);
805                ObjectInputStream objectIn = new ObjectInputStream(audit.getAudit().newInput());
806                int maxNumProducers = getMaxFailoverProducersToTrack();
807                int maxAuditDepth = getFailoverProducersAuditDepth();
808                metadata.producerSequenceIdTracker = (ActiveMQMessageAuditNoSync) objectIn.readObject();
809                metadata.producerSequenceIdTracker.setAuditDepth(maxAuditDepth);
810                metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxNumProducers);
811                requiresReplay = false;
812            } catch (Exception e) {
813                LOG.warn("Cannot recover message audit", e);
814            }
815        }
816        // got no audit stored so got to recreate via replay from start of the journal
817        return requiresReplay;
818    }
819
820    @SuppressWarnings("unchecked")
821    private boolean recoverAckMessageFileMap() throws IOException {
822        boolean requiresReplay = true;
823        if (metadata.ackMessageFileMapLocation != null) {
824            try {
825                KahaAckMessageFileMapCommand audit = (KahaAckMessageFileMapCommand) load(metadata.ackMessageFileMapLocation);
826                ObjectInputStream objectIn = new ObjectInputStream(audit.getAckMessageFileMap().newInput());
827                metadata.ackMessageFileMap = (Map<Integer, Set<Integer>>) objectIn.readObject();
828                requiresReplay = false;
829            } catch (Exception e) {
830                LOG.warn("Cannot recover ackMessageFileMap", e);
831            }
832        }
833        // got no ackMessageFileMap stored so got to recreate via replay from start of the journal
834        return requiresReplay;
835    }
836
837    protected void recoverIndex(Transaction tx) throws IOException {
838        long start = System.currentTimeMillis();
839        // It is possible index updates got applied before the journal updates..
840        // in that case we need to removed references to messages that are not in the journal
841        final Location lastAppendLocation = journal.getLastAppendLocation();
842        long undoCounter=0;
843
844        // Go through all the destinations to see if they have messages past the lastAppendLocation
845        for (String key : storedDestinations.keySet()) {
846            StoredDestination sd = storedDestinations.get(key);
847
848            final ArrayList<Long> matches = new ArrayList<>();
849            // Find all the Locations that are >= than the last Append Location.
850            sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) {
851                @Override
852                protected void matched(Location key, Long value) {
853                    matches.add(value);
854                }
855            });
856
857            for (Long sequenceId : matches) {
858                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
859                if (keys != null) {
860                    sd.locationIndex.remove(tx, keys.location);
861                    sd.messageIdIndex.remove(tx, keys.messageId);
862                    metadata.producerSequenceIdTracker.rollback(keys.messageId);
863                    undoCounter++;
864                    decrementAndSubSizeToStoreStat(key, keys.location.getSize());
865                    // TODO: do we need to modify the ack positions for the pub sub case?
866                }
867            }
868        }
869
870        if (undoCounter > 0) {
871            // The rolledback operations are basically in flight journal writes.  To avoid getting
872            // these the end user should do sync writes to the journal.
873            if (LOG.isInfoEnabled()) {
874                long end = System.currentTimeMillis();
875                LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
876            }
877        }
878
879        undoCounter = 0;
880        start = System.currentTimeMillis();
881
882        // Lets be extra paranoid here and verify that all the datafiles being referenced
883        // by the indexes still exists.
884
885        final SequenceSet ss = new SequenceSet();
886        for (StoredDestination sd : storedDestinations.values()) {
887            // Use a visitor to cut down the number of pages that we load
888            sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
889                int last=-1;
890
891                @Override
892                public boolean isInterestedInKeysBetween(Location first, Location second) {
893                    if( first==null ) {
894                        return !ss.contains(0, second.getDataFileId());
895                    } else if( second==null ) {
896                        return true;
897                    } else {
898                        return !ss.contains(first.getDataFileId(), second.getDataFileId());
899                    }
900                }
901
902                @Override
903                public void visit(List<Location> keys, List<Long> values) {
904                    for (Location l : keys) {
905                        int fileId = l.getDataFileId();
906                        if( last != fileId ) {
907                            ss.add(fileId);
908                            last = fileId;
909                        }
910                    }
911                }
912
913            });
914        }
915        HashSet<Integer> missingJournalFiles = new HashSet<>();
916        while (!ss.isEmpty()) {
917            missingJournalFiles.add((int) ss.removeFirst());
918        }
919
920        for (Entry<Integer, Set<Integer>> entry : metadata.ackMessageFileMap.entrySet()) {
921            missingJournalFiles.add(entry.getKey());
922            for (Integer i : entry.getValue()) {
923                missingJournalFiles.add(i);
924            }
925        }
926
927        missingJournalFiles.removeAll(journal.getFileMap().keySet());
928
929        if (!missingJournalFiles.isEmpty()) {
930            LOG.warn("Some journal files are missing: " + missingJournalFiles);
931        }
932
933        ArrayList<BTreeVisitor.Predicate<Location>> knownCorruption = new ArrayList<>();
934        ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<>();
935        for (Integer missing : missingJournalFiles) {
936            missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0)));
937        }
938
939        if (checkForCorruptJournalFiles) {
940            Collection<DataFile> dataFiles = journal.getFileMap().values();
941            for (DataFile dataFile : dataFiles) {
942                int id = dataFile.getDataFileId();
943                // eof to next file id
944                missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0)));
945                Sequence seq = dataFile.getCorruptedBlocks().getHead();
946                while (seq != null) {
947                    BTreeVisitor.BetweenVisitor<Location, Long> visitor =
948                        new BTreeVisitor.BetweenVisitor<>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1));
949                    missingPredicates.add(visitor);
950                    knownCorruption.add(visitor);
951                    seq = seq.getNext();
952                }
953            }
954        }
955
956        if (!missingPredicates.isEmpty()) {
957            for (Entry<String, StoredDestination> sdEntry : storedDestinations.entrySet()) {
958                final StoredDestination sd = sdEntry.getValue();
959                final LinkedHashMap<Long, Location> matches = new LinkedHashMap<>();
960                sd.locationIndex.visit(tx, new BTreeVisitor.OrVisitor<Location, Long>(missingPredicates) {
961                    @Override
962                    protected void matched(Location key, Long value) {
963                        matches.put(value, key);
964                    }
965                });
966
967                // If some message references are affected by the missing data files...
968                if (!matches.isEmpty()) {
969
970                    // We either 'gracefully' recover dropping the missing messages or
971                    // we error out.
972                    if( ignoreMissingJournalfiles ) {
973                        // Update the index to remove the references to the missing data
974                        for (Long sequenceId : matches.keySet()) {
975                            MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
976                            sd.locationIndex.remove(tx, keys.location);
977                            sd.messageIdIndex.remove(tx, keys.messageId);
978                            LOG.info("[" + sdEntry.getKey() + "] dropped: " + keys.messageId + " at corrupt location: " + keys.location);
979                            undoCounter++;
980                            decrementAndSubSizeToStoreStat(sdEntry.getKey(), keys.location.getSize());
981                            // TODO: do we need to modify the ack positions for the pub sub case?
982                        }
983                    } else {
984                        LOG.error("[" + sdEntry.getKey() + "] references corrupt locations: " + matches);
985                        throw new IOException("Detected missing/corrupt journal files referenced by:[" + sdEntry.getKey() + "] " +matches.size()+" messages affected.");
986                    }
987                }
988            }
989        }
990
991        if (!ignoreMissingJournalfiles) {
992            if (!knownCorruption.isEmpty()) {
993                LOG.error("Detected corrupt journal files. " + knownCorruption);
994                throw new IOException("Detected corrupt journal files. " + knownCorruption);
995            }
996
997            if (!missingJournalFiles.isEmpty()) {
998                LOG.error("Detected missing journal files. " + missingJournalFiles);
999                throw new IOException("Detected missing journal files. " + missingJournalFiles);
1000            }
1001        }
1002
1003        if (undoCounter > 0) {
1004            // The rolledback operations are basically in flight journal writes.  To avoid getting these the end user
1005            // should do sync writes to the journal.
1006            if (LOG.isInfoEnabled()) {
1007                long end = System.currentTimeMillis();
1008                LOG.info("Detected missing/corrupt journal files.  Dropped " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
1009            }
1010        }
1011    }
1012
1013    private Location nextRecoveryPosition;
1014    private Location lastRecoveryPosition;
1015
1016    public void incrementalRecover() throws IOException {
1017        this.indexLock.writeLock().lock();
1018        try {
1019            if( nextRecoveryPosition == null ) {
1020                if( lastRecoveryPosition==null ) {
1021                    nextRecoveryPosition = getRecoveryPosition();
1022                } else {
1023                    nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition);
1024                }
1025            }
1026            while (nextRecoveryPosition != null) {
1027                lastRecoveryPosition = nextRecoveryPosition;
1028                metadata.lastUpdate = lastRecoveryPosition;
1029                JournalCommand<?> message = load(lastRecoveryPosition);
1030                process(message, lastRecoveryPosition, (IndexAware) null);
1031                nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition);
1032            }
1033        } finally {
1034            this.indexLock.writeLock().unlock();
1035        }
1036    }
1037
1038    public Location getLastUpdatePosition() throws IOException {
1039        return metadata.lastUpdate;
1040    }
1041
1042    private Location getRecoveryPosition() throws IOException {
1043
1044        if (!this.forceRecoverIndex) {
1045
1046            // If we need to recover the transactions..
1047            if (metadata.firstInProgressTransactionLocation != null) {
1048                return metadata.firstInProgressTransactionLocation;
1049            }
1050
1051            // Perhaps there were no transactions...
1052            if( metadata.lastUpdate!=null) {
1053                // Start replay at the record after the last one recorded in the index file.
1054                return getNextInitializedLocation(metadata.lastUpdate);
1055            }
1056        }
1057        // This loads the first position.
1058        return journal.getNextLocation(null);
1059    }
1060
1061    private Location getNextInitializedLocation(Location location) throws IOException {
1062        Location mayNotBeInitialized = journal.getNextLocation(location);
1063        if (location.getSize() == NOT_SET && mayNotBeInitialized != null && mayNotBeInitialized.getSize() != NOT_SET) {
1064            // need to init size and type to skip
1065            return journal.getNextLocation(mayNotBeInitialized);
1066        } else {
1067            return mayNotBeInitialized;
1068        }
1069    }
1070
1071    protected void checkpointCleanup(final boolean cleanup) throws IOException {
1072        long start;
1073        this.indexLock.writeLock().lock();
1074        try {
1075            start = System.currentTimeMillis();
1076            if( !opened.get() ) {
1077                return;
1078            }
1079        } finally {
1080            this.indexLock.writeLock().unlock();
1081        }
1082        checkpointUpdate(cleanup);
1083        long end = System.currentTimeMillis();
1084        if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) {
1085            if (LOG.isInfoEnabled()) {
1086                LOG.info("Slow KahaDB access: cleanup took " + (end - start));
1087            }
1088        }
1089    }
1090
1091    public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException {
1092        int size = data.serializedSizeFramed();
1093        DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
1094        os.writeByte(data.type().getNumber());
1095        data.writeFramed(os);
1096        return os.toByteSequence();
1097    }
1098
1099    // /////////////////////////////////////////////////////////////////
1100    // Methods call by the broker to update and query the store.
1101    // /////////////////////////////////////////////////////////////////
1102    public Location store(JournalCommand<?> data) throws IOException {
1103        return store(data, false, null,null);
1104    }
1105
1106    public Location store(JournalCommand<?> data, Runnable onJournalStoreComplete) throws IOException {
1107        return store(data, false, null, null, onJournalStoreComplete);
1108    }
1109
1110    public Location store(JournalCommand<?> data, boolean sync, IndexAware before,Runnable after) throws IOException {
1111        return store(data, sync, before, after, null);
1112    }
1113
1114    /**
1115     * All updated are are funneled through this method. The updates are converted
1116     * to a JournalMessage which is logged to the journal and then the data from
1117     * the JournalMessage is used to update the index just like it would be done
1118     * during a recovery process.
1119     */
1120    public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException {
1121        try {
1122            ByteSequence sequence = toByteSequence(data);
1123            Location location;
1124
1125            checkpointLock.readLock().lock();
1126            try {
1127
1128                long start = System.currentTimeMillis();
1129                location = onJournalStoreComplete == null ? journal.write(sequence, sync) : journal.write(sequence, onJournalStoreComplete) ;
1130                long start2 = System.currentTimeMillis();
1131                //Track the last async update so we know if we need to sync at the next checkpoint
1132                if (!sync && journal.isJournalDiskSyncPeriodic()) {
1133                    lastAsyncJournalUpdate.set(location);
1134                }
1135                process(data, location, before);
1136
1137                long end = System.currentTimeMillis();
1138                if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) {
1139                    if (LOG.isInfoEnabled()) {
1140                        LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms");
1141                    }
1142                }
1143            } finally {
1144                checkpointLock.readLock().unlock();
1145            }
1146
1147            if (after != null) {
1148                after.run();
1149            }
1150
1151            if (scheduler == null && opened.get()) {
1152                startCheckpoint();
1153            }
1154            return location;
1155        } catch (IOException ioe) {
1156            LOG.error("KahaDB failed to store to Journal, command of type: " + data.type(), ioe);
1157            brokerService.handleIOException(ioe);
1158            throw ioe;
1159        }
1160    }
1161
1162    /**
1163     * Loads a previously stored JournalMessage
1164     *
1165     * @param location
1166     * @return
1167     * @throws IOException
1168     */
1169    public JournalCommand<?> load(Location location) throws IOException {
1170        long start = System.currentTimeMillis();
1171        ByteSequence data = journal.read(location);
1172        long end = System.currentTimeMillis();
1173        if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) {
1174            if (LOG.isInfoEnabled()) {
1175                LOG.info("Slow KahaDB access: Journal read took: "+(end-start)+" ms");
1176            }
1177        }
1178        DataByteArrayInputStream is = new DataByteArrayInputStream(data);
1179        byte readByte = is.readByte();
1180        KahaEntryType type = KahaEntryType.valueOf(readByte);
1181        if( type == null ) {
1182            try {
1183                is.close();
1184            } catch (IOException e) {}
1185            throw new IOException("Could not load journal record, null type information from: " + readByte + " at location: "+location);
1186        }
1187        JournalCommand<?> message = (JournalCommand<?>)type.createMessage();
1188        message.mergeFramed(is);
1189        return message;
1190    }
1191
1192    /**
1193     * do minimal recovery till we reach the last inDoubtLocation
1194     * @param data
1195     * @param location
1196     * @param inDoubtlocation
1197     * @throws IOException
1198     */
1199    void process(JournalCommand<?> data, final Location location, final Location inDoubtlocation) throws IOException {
1200        if (inDoubtlocation != null && location.compareTo(inDoubtlocation) >= 0) {
1201            process(data, location, (IndexAware) null);
1202        } else {
1203            // just recover producer audit
1204            data.visit(new Visitor() {
1205                @Override
1206                public void visit(KahaAddMessageCommand command) throws IOException {
1207                    metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId());
1208                }
1209            });
1210        }
1211    }
1212
1213    // /////////////////////////////////////////////////////////////////
1214    // Journaled record processing methods. Once the record is journaled,
1215    // these methods handle applying the index updates. These may be called
1216    // from the recovery method too so they need to be idempotent
1217    // /////////////////////////////////////////////////////////////////
1218
1219    void process(JournalCommand<?> data, final Location location, final IndexAware onSequenceAssignedCallback) throws IOException {
1220        data.visit(new Visitor() {
1221            @Override
1222            public void visit(KahaAddMessageCommand command) throws IOException {
1223                process(command, location, onSequenceAssignedCallback);
1224            }
1225
1226            @Override
1227            public void visit(KahaRemoveMessageCommand command) throws IOException {
1228                process(command, location);
1229            }
1230
1231            @Override
1232            public void visit(KahaPrepareCommand command) throws IOException {
1233                process(command, location);
1234            }
1235
1236            @Override
1237            public void visit(KahaCommitCommand command) throws IOException {
1238                process(command, location, onSequenceAssignedCallback);
1239            }
1240
1241            @Override
1242            public void visit(KahaRollbackCommand command) throws IOException {
1243                process(command, location);
1244            }
1245
1246            @Override
1247            public void visit(KahaRemoveDestinationCommand command) throws IOException {
1248                process(command, location);
1249            }
1250
1251            @Override
1252            public void visit(KahaSubscriptionCommand command) throws IOException {
1253                process(command, location);
1254            }
1255
1256            @Override
1257            public void visit(KahaProducerAuditCommand command) throws IOException {
1258                processLocation(location);
1259            }
1260
1261            @Override
1262            public void visit(KahaAckMessageFileMapCommand command) throws IOException {
1263                processLocation(location);
1264            }
1265
1266            @Override
1267            public void visit(KahaTraceCommand command) {
1268                processLocation(location);
1269            }
1270
1271            @Override
1272            public void visit(KahaUpdateMessageCommand command) throws IOException {
1273                process(command, location);
1274            }
1275
1276            @Override
1277            public void visit(KahaRewrittenDataFileCommand command) throws IOException {
1278                process(command, location);
1279            }
1280        });
1281    }
1282
1283    @SuppressWarnings("rawtypes")
1284    protected void process(final KahaAddMessageCommand command, final Location location, final IndexAware runWithIndexLock) throws IOException {
1285        if (command.hasTransactionInfo()) {
1286            List<Operation> inflightTx = getInflightTx(command.getTransactionInfo());
1287            inflightTx.add(new AddOperation(command, location, runWithIndexLock));
1288        } else {
1289            this.indexLock.writeLock().lock();
1290            try {
1291                pageFile.tx().execute(new Transaction.Closure<IOException>() {
1292                    @Override
1293                    public void execute(Transaction tx) throws IOException {
1294                        long assignedIndex = updateIndex(tx, command, location);
1295                        if (runWithIndexLock != null) {
1296                            runWithIndexLock.sequenceAssignedWithIndexLocked(assignedIndex);
1297                        }
1298                    }
1299                });
1300
1301            } finally {
1302                this.indexLock.writeLock().unlock();
1303            }
1304        }
1305    }
1306
1307    protected void process(final KahaUpdateMessageCommand command, final Location location) throws IOException {
1308        this.indexLock.writeLock().lock();
1309        try {
1310            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1311                @Override
1312                public void execute(Transaction tx) throws IOException {
1313                    updateIndex(tx, command, location);
1314                }
1315            });
1316        } finally {
1317            this.indexLock.writeLock().unlock();
1318        }
1319    }
1320
1321    @SuppressWarnings("rawtypes")
1322    protected void process(final KahaRemoveMessageCommand command, final Location location) throws IOException {
1323        if (command.hasTransactionInfo()) {
1324           List<Operation> inflightTx = getInflightTx(command.getTransactionInfo());
1325           inflightTx.add(new RemoveOperation(command, location));
1326        } else {
1327            this.indexLock.writeLock().lock();
1328            try {
1329                pageFile.tx().execute(new Transaction.Closure<IOException>() {
1330                    @Override
1331                    public void execute(Transaction tx) throws IOException {
1332                        updateIndex(tx, command, location);
1333                    }
1334                });
1335            } finally {
1336                this.indexLock.writeLock().unlock();
1337            }
1338        }
1339    }
1340
1341    protected void process(final KahaRemoveDestinationCommand command, final Location location) throws IOException {
1342        this.indexLock.writeLock().lock();
1343        try {
1344            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1345                @Override
1346                public void execute(Transaction tx) throws IOException {
1347                    updateIndex(tx, command, location);
1348                }
1349            });
1350        } finally {
1351            this.indexLock.writeLock().unlock();
1352        }
1353    }
1354
1355    protected void process(final KahaSubscriptionCommand command, final Location location) throws IOException {
1356        this.indexLock.writeLock().lock();
1357        try {
1358            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1359                @Override
1360                public void execute(Transaction tx) throws IOException {
1361                    updateIndex(tx, command, location);
1362                }
1363            });
1364        } finally {
1365            this.indexLock.writeLock().unlock();
1366        }
1367    }
1368
1369    protected void processLocation(final Location location) {
1370        this.indexLock.writeLock().lock();
1371        try {
1372            metadata.lastUpdate = location;
1373        } finally {
1374            this.indexLock.writeLock().unlock();
1375        }
1376    }
1377
1378    @SuppressWarnings("rawtypes")
1379    protected void process(KahaCommitCommand command, final Location location, final IndexAware before) throws IOException {
1380        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1381        List<Operation> inflightTx;
1382        synchronized (inflightTransactions) {
1383            inflightTx = inflightTransactions.remove(key);
1384            if (inflightTx == null) {
1385                inflightTx = preparedTransactions.remove(key);
1386            }
1387        }
1388        if (inflightTx == null) {
1389            // only non persistent messages in this tx
1390            if (before != null) {
1391                before.sequenceAssignedWithIndexLocked(-1);
1392            }
1393            return;
1394        }
1395
1396        final List<Operation> messagingTx = inflightTx;
1397        indexLock.writeLock().lock();
1398        try {
1399            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1400                @Override
1401                public void execute(Transaction tx) throws IOException {
1402                    for (Operation op : messagingTx) {
1403                        op.execute(tx);
1404                    }
1405                }
1406            });
1407            metadata.lastUpdate = location;
1408        } finally {
1409            indexLock.writeLock().unlock();
1410        }
1411    }
1412
1413    @SuppressWarnings("rawtypes")
1414    protected void process(KahaPrepareCommand command, Location location) {
1415        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1416        synchronized (inflightTransactions) {
1417            List<Operation> tx = inflightTransactions.remove(key);
1418            if (tx != null) {
1419                preparedTransactions.put(key, tx);
1420            }
1421        }
1422    }
1423
1424    @SuppressWarnings("rawtypes")
1425    protected void process(KahaRollbackCommand command, Location location)  throws IOException {
1426        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1427        List<Operation> updates = null;
1428        synchronized (inflightTransactions) {
1429            updates = inflightTransactions.remove(key);
1430            if (updates == null) {
1431                updates = preparedTransactions.remove(key);
1432            }
1433        }
1434    }
1435
1436    protected void process(KahaRewrittenDataFileCommand command, Location location)  throws IOException {
1437        final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet());
1438
1439        // Mark the current journal file as a compacted file so that gc checks can skip
1440        // over logs that are smaller compaction type logs.
1441        DataFile current = journal.getDataFileById(location.getDataFileId());
1442        current.setTypeCode(command.getRewriteType());
1443
1444        if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) {
1445            // Move offset so that next location read jumps to next file.
1446            location.setOffset(journalMaxFileLength);
1447        }
1448    }
1449
1450    // /////////////////////////////////////////////////////////////////
1451    // These methods do the actual index updates.
1452    // /////////////////////////////////////////////////////////////////
1453
1454    protected final ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock();
1455    private final HashSet<Integer> journalFilesBeingReplicated = new HashSet<>();
1456
1457    long updateIndex(Transaction tx, KahaAddMessageCommand command, Location location) throws IOException {
1458        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1459
1460        // Skip adding the message to the index if this is a topic and there are
1461        // no subscriptions.
1462        if (sd.subscriptions != null && sd.subscriptions.isEmpty(tx)) {
1463            return -1;
1464        }
1465
1466        // Add the message.
1467        int priority = command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY;
1468        long id = sd.orderIndex.getNextMessageId();
1469        Long previous = sd.locationIndex.put(tx, location, id);
1470        if (previous == null) {
1471            previous = sd.messageIdIndex.put(tx, command.getMessageId(), id);
1472            if (previous == null) {
1473                incrementAndAddSizeToStoreStat(command.getDestination(), location.getSize());
1474                sd.orderIndex.put(tx, priority, id, new MessageKeys(command.getMessageId(), location));
1475                if (sd.subscriptions != null && !sd.subscriptions.isEmpty(tx)) {
1476                    addAckLocationForNewMessage(tx, command.getDestination(), sd, id);
1477                }
1478                metadata.lastUpdate = location;
1479            } else {
1480
1481                MessageKeys messageKeys = sd.orderIndex.get(tx, previous);
1482                if (messageKeys != null && messageKeys.location.compareTo(location) < 0) {
1483                    // If the message ID is indexed, then the broker asked us to store a duplicate before the message was dispatched and acked, we ignore this add attempt
1484                    LOG.warn("Duplicate message add attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId());
1485                }
1486                sd.messageIdIndex.put(tx, command.getMessageId(), previous);
1487                sd.locationIndex.remove(tx, location);
1488                id = -1;
1489            }
1490        } else {
1491            // restore the previous value.. Looks like this was a redo of a previously
1492            // added message. We don't want to assign it a new id as the other indexes would
1493            // be wrong..
1494            sd.locationIndex.put(tx, location, previous);
1495            // ensure sequence is not broken
1496            sd.orderIndex.revertNextMessageId();
1497            metadata.lastUpdate = location;
1498        }
1499        // record this id in any event, initial send or recovery
1500        metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId());
1501
1502       return id;
1503    }
1504
1505    void trackPendingAdd(KahaDestination destination, Long seq) {
1506        StoredDestination sd = storedDestinations.get(key(destination));
1507        if (sd != null) {
1508            sd.trackPendingAdd(seq);
1509        }
1510    }
1511
1512    void trackPendingAddComplete(KahaDestination destination, Long seq) {
1513        StoredDestination sd = storedDestinations.get(key(destination));
1514        if (sd != null) {
1515            sd.trackPendingAddComplete(seq);
1516        }
1517    }
1518
1519    void updateIndex(Transaction tx, KahaUpdateMessageCommand updateMessageCommand, Location location) throws IOException {
1520        KahaAddMessageCommand command = updateMessageCommand.getMessage();
1521        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1522
1523        Long id = sd.messageIdIndex.get(tx, command.getMessageId());
1524        if (id != null) {
1525            MessageKeys previousKeys = sd.orderIndex.put(
1526                    tx,
1527                    command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY,
1528                    id,
1529                    new MessageKeys(command.getMessageId(), location)
1530            );
1531            sd.locationIndex.put(tx, location, id);
1532            incrementAndAddSizeToStoreStat(command.getDestination(), location.getSize());
1533
1534            if (previousKeys != null) {
1535                //Remove the existing from the size
1536                decrementAndSubSizeToStoreStat(command.getDestination(), previousKeys.location.getSize());
1537
1538                //update all the subscription metrics
1539                if (enableSubscriptionStatistics && sd.ackPositions != null && location.getSize() != previousKeys.location.getSize()) {
1540                    Iterator<Entry<String, SequenceSet>> iter = sd.ackPositions.iterator(tx);
1541                    while (iter.hasNext()) {
1542                        Entry<String, SequenceSet> e = iter.next();
1543                        if (e.getValue().contains(id)) {
1544                            incrementAndAddSizeToStoreStat(key(command.getDestination()), e.getKey(), location.getSize());
1545                            decrementAndSubSizeToStoreStat(key(command.getDestination()), e.getKey(), previousKeys.location.getSize());
1546                        }
1547                    }
1548                }
1549
1550                // on first update previous is original location, on recovery/replay it may be the updated location
1551                if(!previousKeys.location.equals(location)) {
1552                    sd.locationIndex.remove(tx, previousKeys.location);
1553                }
1554            }
1555            metadata.lastUpdate = location;
1556        } else {
1557            //Add the message if it can't be found
1558            this.updateIndex(tx, command, location);
1559        }
1560    }
1561
1562    void updateIndex(Transaction tx, KahaRemoveMessageCommand command, Location ackLocation) throws IOException {
1563        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1564        if (!command.hasSubscriptionKey()) {
1565
1566            // In the queue case we just remove the message from the index..
1567            Long sequenceId = sd.messageIdIndex.remove(tx, command.getMessageId());
1568            if (sequenceId != null) {
1569                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
1570                if (keys != null) {
1571                    sd.locationIndex.remove(tx, keys.location);
1572                    decrementAndSubSizeToStoreStat(command.getDestination(), keys.location.getSize());
1573                    recordAckMessageReferenceLocation(ackLocation, keys.location);
1574                    metadata.lastUpdate = ackLocation;
1575                }  else if (LOG.isDebugEnabled()) {
1576                    LOG.debug("message not found in order index: " + sequenceId  + " for: " + command.getMessageId());
1577                }
1578            } else if (LOG.isDebugEnabled()) {
1579                LOG.debug("message not found in sequence id index: " + command.getMessageId());
1580            }
1581        } else {
1582            // In the topic case we need remove the message once it's been acked
1583            // by all the subs
1584            Long sequence = sd.messageIdIndex.get(tx, command.getMessageId());
1585
1586            // Make sure it's a valid message id...
1587            if (sequence != null) {
1588                String subscriptionKey = command.getSubscriptionKey();
1589                if (command.getAck() != UNMATCHED) {
1590                    sd.orderIndex.get(tx, sequence);
1591                    byte priority = sd.orderIndex.lastGetPriority();
1592                    sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(sequence, priority));
1593                }
1594
1595                MessageKeys keys = sd.orderIndex.get(tx, sequence);
1596                if (keys != null) {
1597                    recordAckMessageReferenceLocation(ackLocation, keys.location);
1598                }
1599                // The following method handles deleting un-referenced messages.
1600                removeAckLocation(command, tx, sd, subscriptionKey, sequence);
1601                metadata.lastUpdate = ackLocation;
1602            } else if (LOG.isDebugEnabled()) {
1603                LOG.debug("on ack, no message sequence exists for id: " + command.getMessageId() + " and sub: " + command.getSubscriptionKey());
1604            }
1605
1606        }
1607    }
1608
1609    private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) {
1610        Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId()));
1611        if (referenceFileIds == null) {
1612            referenceFileIds = new HashSet<>();
1613            referenceFileIds.add(messageLocation.getDataFileId());
1614            metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds);
1615        } else {
1616            Integer id = Integer.valueOf(messageLocation.getDataFileId());
1617            if (!referenceFileIds.contains(id)) {
1618                referenceFileIds.add(id);
1619            }
1620        }
1621    }
1622
1623    void updateIndex(Transaction tx, KahaRemoveDestinationCommand command, Location location) throws IOException {
1624        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1625        sd.orderIndex.remove(tx);
1626
1627        sd.locationIndex.clear(tx);
1628        sd.locationIndex.unload(tx);
1629        tx.free(sd.locationIndex.getPageId());
1630
1631        sd.messageIdIndex.clear(tx);
1632        sd.messageIdIndex.unload(tx);
1633        tx.free(sd.messageIdIndex.getPageId());
1634
1635        if (sd.subscriptions != null) {
1636            sd.subscriptions.clear(tx);
1637            sd.subscriptions.unload(tx);
1638            tx.free(sd.subscriptions.getPageId());
1639
1640            sd.subscriptionAcks.clear(tx);
1641            sd.subscriptionAcks.unload(tx);
1642            tx.free(sd.subscriptionAcks.getPageId());
1643
1644            sd.ackPositions.clear(tx);
1645            sd.ackPositions.unload(tx);
1646            tx.free(sd.ackPositions.getHeadPageId());
1647
1648            sd.subLocations.clear(tx);
1649            sd.subLocations.unload(tx);
1650            tx.free(sd.subLocations.getHeadPageId());
1651        }
1652
1653        String key = key(command.getDestination());
1654        storedDestinations.remove(key);
1655        metadata.destinations.remove(tx, key);
1656        clearStoreStats(command.getDestination());
1657        storeCache.remove(key(command.getDestination()));
1658    }
1659
1660    void updateIndex(Transaction tx, KahaSubscriptionCommand command, Location location) throws IOException {
1661        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1662        final String subscriptionKey = command.getSubscriptionKey();
1663
1664        // If set then we are creating it.. otherwise we are destroying the sub
1665        if (command.hasSubscriptionInfo()) {
1666            Location existing = sd.subLocations.get(tx, subscriptionKey);
1667            if (existing != null && existing.compareTo(location) == 0) {
1668                // replay on recovery, ignore
1669                LOG.trace("ignoring journal replay of replay of sub from: " + location);
1670                return;
1671            }
1672
1673            sd.subscriptions.put(tx, subscriptionKey, command);
1674            sd.subLocations.put(tx, subscriptionKey, location);
1675            long ackLocation=NOT_ACKED;
1676            if (!command.getRetroactive()) {
1677                ackLocation = sd.orderIndex.nextMessageId-1;
1678            } else {
1679                addAckLocationForRetroactiveSub(tx, sd, subscriptionKey);
1680            }
1681            sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(ackLocation));
1682            sd.subscriptionCache.add(subscriptionKey);
1683        } else {
1684            // delete the sub...
1685            sd.subscriptions.remove(tx, subscriptionKey);
1686            sd.subLocations.remove(tx, subscriptionKey);
1687            sd.subscriptionAcks.remove(tx, subscriptionKey);
1688            sd.subscriptionCache.remove(subscriptionKey);
1689            removeAckLocationsForSub(command, tx, sd, subscriptionKey);
1690            MessageStoreSubscriptionStatistics subStats = getSubStats(key(command.getDestination()));
1691            if (subStats != null) {
1692                subStats.removeSubscription(subscriptionKey);
1693            }
1694
1695            if (sd.subscriptions.isEmpty(tx)) {
1696                // remove the stored destination
1697                KahaRemoveDestinationCommand removeDestinationCommand = new KahaRemoveDestinationCommand();
1698                removeDestinationCommand.setDestination(command.getDestination());
1699                updateIndex(tx, removeDestinationCommand, null);
1700                clearStoreStats(command.getDestination());
1701            }
1702        }
1703    }
1704
1705    private void checkpointUpdate(final boolean cleanup) throws IOException {
1706        checkpointLock.writeLock().lock();
1707        try {
1708            this.indexLock.writeLock().lock();
1709            try {
1710                Set<Integer> filesToGc = pageFile.tx().execute(new Transaction.CallableClosure<Set<Integer>, IOException>() {
1711                    @Override
1712                    public Set<Integer> execute(Transaction tx) throws IOException {
1713                        return checkpointUpdate(tx, cleanup);
1714                    }
1715                });
1716                pageFile.flush();
1717                // after the index update such that partial removal does not leave dangling references in the index.
1718                journal.removeDataFiles(filesToGc);
1719            } finally {
1720                this.indexLock.writeLock().unlock();
1721            }
1722
1723        } finally {
1724            checkpointLock.writeLock().unlock();
1725        }
1726    }
1727
1728    /**
1729     * @param tx
1730     * @throws IOException
1731     */
1732    Set<Integer> checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
1733        MDC.put("activemq.persistenceDir", getDirectory().getName());
1734        LOG.debug("Checkpoint started.");
1735
1736        // reflect last update exclusive of current checkpoint
1737        Location lastUpdate = metadata.lastUpdate;
1738
1739        metadata.state = OPEN_STATE;
1740        metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit();
1741        metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap();
1742        Location[] inProgressTxRange = getInProgressTxLocationRange();
1743        metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
1744        tx.store(metadata.page, metadataMarshaller, true);
1745
1746        final TreeSet<Integer> gcCandidateSet = new TreeSet<>();
1747        if (cleanup) {
1748
1749            final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet());
1750            gcCandidateSet.addAll(completeFileSet);
1751
1752            if (LOG.isTraceEnabled()) {
1753                LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet);
1754            }
1755
1756            if (lastUpdate != null) {
1757                // we won't delete past the last update, ackCompaction journal can be a candidate in error
1758                gcCandidateSet.removeAll(new TreeSet<Integer>(gcCandidateSet.tailSet(lastUpdate.getDataFileId())));
1759            }
1760
1761            // Don't GC files under replication
1762            if( journalFilesBeingReplicated!=null ) {
1763                gcCandidateSet.removeAll(journalFilesBeingReplicated);
1764            }
1765
1766            if (metadata.producerSequenceIdTrackerLocation != null) {
1767                int dataFileId = metadata.producerSequenceIdTrackerLocation.getDataFileId();
1768                if (gcCandidateSet.contains(dataFileId) && gcCandidateSet.first() == dataFileId) {
1769                    // rewrite so we don't prevent gc
1770                    metadata.producerSequenceIdTracker.setModified(true);
1771                    if (LOG.isTraceEnabled()) {
1772                        LOG.trace("rewriting producerSequenceIdTracker:" + metadata.producerSequenceIdTrackerLocation);
1773                    }
1774                }
1775                gcCandidateSet.remove(dataFileId);
1776                if (LOG.isTraceEnabled()) {
1777                    LOG.trace("gc candidates after producerSequenceIdTrackerLocation:" + metadata.producerSequenceIdTrackerLocation + ", " + gcCandidateSet);
1778                }
1779            }
1780
1781            if (metadata.ackMessageFileMapLocation != null) {
1782                int dataFileId = metadata.ackMessageFileMapLocation.getDataFileId();
1783                gcCandidateSet.remove(dataFileId);
1784                if (LOG.isTraceEnabled()) {
1785                    LOG.trace("gc candidates after ackMessageFileMapLocation:" + metadata.ackMessageFileMapLocation + ", " + gcCandidateSet);
1786                }
1787            }
1788
1789            // Don't GC files referenced by in-progress tx
1790            if (inProgressTxRange[0] != null) {
1791                for (int pendingTx=inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) {
1792                    gcCandidateSet.remove(pendingTx);
1793                }
1794            }
1795            if (LOG.isTraceEnabled()) {
1796                LOG.trace("gc candidates after in progress tx range:" + Arrays.asList(inProgressTxRange) + ", " + gcCandidateSet);
1797            }
1798
1799            // Go through all the destinations to see if any of them can remove GC candidates.
1800            for (Entry<String, StoredDestination> entry : storedDestinations.entrySet()) {
1801                if( gcCandidateSet.isEmpty() ) {
1802                    break;
1803                }
1804
1805                // Use a visitor to cut down the number of pages that we load
1806                entry.getValue().locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
1807                    int last=-1;
1808                    @Override
1809                    public boolean isInterestedInKeysBetween(Location first, Location second) {
1810                        if( first==null ) {
1811                            SortedSet<Integer> subset = gcCandidateSet.headSet(second.getDataFileId()+1);
1812                            if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) {
1813                                subset.remove(second.getDataFileId());
1814                            }
1815                            return !subset.isEmpty();
1816                        } else if( second==null ) {
1817                            SortedSet<Integer> subset = gcCandidateSet.tailSet(first.getDataFileId());
1818                            if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) {
1819                                subset.remove(first.getDataFileId());
1820                            }
1821                            return !subset.isEmpty();
1822                        } else {
1823                            SortedSet<Integer> subset = gcCandidateSet.subSet(first.getDataFileId(), second.getDataFileId()+1);
1824                            if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) {
1825                                subset.remove(first.getDataFileId());
1826                            }
1827                            if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) {
1828                                subset.remove(second.getDataFileId());
1829                            }
1830                            return !subset.isEmpty();
1831                        }
1832                    }
1833
1834                    @Override
1835                    public void visit(List<Location> keys, List<Long> values) {
1836                        for (Location l : keys) {
1837                            int fileId = l.getDataFileId();
1838                            if( last != fileId ) {
1839                                gcCandidateSet.remove(fileId);
1840                                last = fileId;
1841                            }
1842                        }
1843                    }
1844                });
1845
1846                // Durable Subscription
1847                if (entry.getValue().subLocations != null) {
1848                    Iterator<Entry<String, Location>> iter = entry.getValue().subLocations.iterator(tx);
1849                    while (iter.hasNext()) {
1850                        Entry<String, Location> subscription = iter.next();
1851                        int dataFileId = subscription.getValue().getDataFileId();
1852
1853                        // Move subscription along if it has no outstanding messages that need ack'd
1854                        // and its in the last log file in the journal.
1855                        if (!gcCandidateSet.isEmpty() && gcCandidateSet.first() == dataFileId) {
1856                            final StoredDestination destination = entry.getValue();
1857                            final String subscriptionKey = subscription.getKey();
1858                            SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey);
1859
1860                            // When pending is size one that is the next message Id meaning there
1861                            // are no pending messages currently.
1862                            if (pendingAcks == null || pendingAcks.isEmpty() ||
1863                                (pendingAcks.size() == 1 && pendingAcks.getTail().range() == 1)) {
1864
1865                                if (LOG.isTraceEnabled()) {
1866                                    LOG.trace("Found candidate for rewrite: {} from file {}", entry.getKey(), dataFileId);
1867                                }
1868
1869                                final KahaSubscriptionCommand kahaSub =
1870                                    destination.subscriptions.get(tx, subscriptionKey);
1871                                destination.subLocations.put(
1872                                    tx, subscriptionKey, checkpointSubscriptionCommand(kahaSub));
1873
1874                                // Skips the remove from candidates if we rewrote the subscription
1875                                // in order to prevent duplicate subscription commands on recover.
1876                                // If another subscription is on the same file and isn't rewritten
1877                                // than it will remove the file from the set.
1878                                continue;
1879                            }
1880                        }
1881
1882                        gcCandidateSet.remove(dataFileId);
1883                    }
1884                }
1885
1886                if (LOG.isTraceEnabled()) {
1887                    LOG.trace("gc candidates after dest:" + entry.getKey() + ", " + gcCandidateSet);
1888                }
1889            }
1890
1891            // check we are not deleting file with ack for in-use journal files
1892            if (LOG.isTraceEnabled()) {
1893                LOG.trace("gc candidates: " + gcCandidateSet);
1894                LOG.trace("ackMessageFileMap: " +  metadata.ackMessageFileMap);
1895            }
1896
1897            boolean ackMessageFileMapMod = false;
1898            Iterator<Integer> candidates = gcCandidateSet.iterator();
1899            while (candidates.hasNext()) {
1900                Integer candidate = candidates.next();
1901                Set<Integer> referencedFileIds = metadata.ackMessageFileMap.get(candidate);
1902                if (referencedFileIds != null) {
1903                    for (Integer referencedFileId : referencedFileIds) {
1904                        if (completeFileSet.contains(referencedFileId) && !gcCandidateSet.contains(referencedFileId)) {
1905                            // active file that is not targeted for deletion is referenced so don't delete
1906                            candidates.remove();
1907                            break;
1908                        }
1909                    }
1910                    if (gcCandidateSet.contains(candidate)) {
1911                        ackMessageFileMapMod |= (metadata.ackMessageFileMap.remove(candidate) != null);
1912                    } else {
1913                        if (LOG.isTraceEnabled()) {
1914                            LOG.trace("not removing data file: " + candidate
1915                                    + " as contained ack(s) refer to referenced file: " + referencedFileIds);
1916                        }
1917                    }
1918                }
1919            }
1920
1921            if (!gcCandidateSet.isEmpty()) {
1922                LOG.debug("Cleanup removing the data files: {}", gcCandidateSet);
1923                for (Integer candidate : gcCandidateSet) {
1924                    for (Set<Integer> ackFiles : metadata.ackMessageFileMap.values()) {
1925                        ackMessageFileMapMod |= ackFiles.remove(candidate);
1926                    }
1927                }
1928                if (ackMessageFileMapMod) {
1929                    checkpointUpdate(tx, false);
1930                }
1931            } else if (isEnableAckCompaction()) {
1932                if (++checkPointCyclesWithNoGC >= getCompactAcksAfterNoGC()) {
1933                    // First check length of journal to make sure it makes sense to even try.
1934                    //
1935                    // If there is only one journal file with Acks in it we don't need to move
1936                    // it since it won't be chained to any later logs.
1937                    //
1938                    // If the logs haven't grown since the last time then we need to compact
1939                    // otherwise there seems to still be room for growth and we don't need to incur
1940                    // the overhead.  Depending on configuration this check can be avoided and
1941                    // Ack compaction will run any time the store has not GC'd a journal file in
1942                    // the configured amount of cycles.
1943                    if (metadata.ackMessageFileMap.size() > 1 &&
1944                        (journalLogOnLastCompactionCheck == journal.getCurrentDataFileId() || isCompactAcksIgnoresStoreGrowth())) {
1945
1946                        LOG.trace("No files GC'd checking if threshold to ACK compaction has been met.");
1947                        try {
1948                            scheduler.execute(new AckCompactionRunner());
1949                        } catch (Exception ex) {
1950                            LOG.warn("Error on queueing the Ack Compactor", ex);
1951                        }
1952                    } else {
1953                        LOG.trace("Journal activity detected, no Ack compaction scheduled.");
1954                    }
1955
1956                    checkPointCyclesWithNoGC = 0;
1957                } else {
1958                    LOG.trace("Not yet time to check for compaction: {} of {} cycles",
1959                              checkPointCyclesWithNoGC, getCompactAcksAfterNoGC());
1960                }
1961
1962                journalLogOnLastCompactionCheck = journal.getCurrentDataFileId();
1963            }
1964        }
1965        MDC.remove("activemq.persistenceDir");
1966
1967        LOG.debug("Checkpoint done.");
1968        return gcCandidateSet;
1969    }
1970
1971    private final class AckCompactionRunner implements Runnable {
1972
1973        @Override
1974        public void run() {
1975
1976            int journalToAdvance = -1;
1977            Set<Integer> journalLogsReferenced = new HashSet<>();
1978
1979            //flag to know whether the ack forwarding completed without an exception
1980            boolean forwarded = false;
1981
1982            try {
1983                //acquire the checkpoint lock to prevent other threads from
1984                //running a checkpoint while this is running
1985                //
1986                //Normally this task runs on the same executor as the checkpoint task
1987                //so this ack compaction runner wouldn't run at the same time as the checkpoint task.
1988                //
1989                //However, there are two cases where this isn't always true.
1990                //First, the checkpoint() method is public and can be called through the
1991                //PersistenceAdapter interface by someone at the same time this is running.
1992                //Second, a checkpoint is called during shutdown without using the executor.
1993                //
1994                //In the future it might be better to just remove the checkpointLock entirely
1995                //and only use the executor but this would need to be examined for any unintended
1996                //consequences
1997                checkpointLock.readLock().lock();
1998
1999                try {
2000
2001                    // Lock index to capture the ackMessageFileMap data
2002                    indexLock.writeLock().lock();
2003
2004                    // Map keys might not be sorted, find the earliest log file to forward acks
2005                    // from and move only those, future cycles can chip away at more as needed.
2006                    // We won't move files that are themselves rewritten on a previous compaction.
2007                    List<Integer> journalFileIds = new ArrayList<>(metadata.ackMessageFileMap.keySet());
2008                    Collections.sort(journalFileIds);
2009                    for (Integer journalFileId : journalFileIds) {
2010                        DataFile current = journal.getDataFileById(journalFileId);
2011                        if (current != null && current.getTypeCode() != COMPACTED_JOURNAL_FILE) {
2012                            journalToAdvance = journalFileId;
2013                            break;
2014                        }
2015                    }
2016
2017                    // Check if we found one, or if we only found the current file being written to.
2018                    if (journalToAdvance == -1 || journalToAdvance == journal.getCurrentDataFileId()) {
2019                        return;
2020                    }
2021
2022                    journalLogsReferenced.addAll(metadata.ackMessageFileMap.get(journalToAdvance));
2023
2024                } finally {
2025                    indexLock.writeLock().unlock();
2026                }
2027
2028                try {
2029                    // Background rewrite of the old acks
2030                    forwardAllAcks(journalToAdvance, journalLogsReferenced);
2031                    forwarded = true;
2032                } catch (IOException ioe) {
2033                    LOG.error("Forwarding of acks failed", ioe);
2034                    brokerService.handleIOException(ioe);
2035                } catch (Throwable e) {
2036                    LOG.error("Forwarding of acks failed", e);
2037                    brokerService.handleIOException(IOExceptionSupport.create(e));
2038                }
2039            } finally {
2040                checkpointLock.readLock().unlock();
2041            }
2042
2043            try {
2044                if (forwarded) {
2045                    // Checkpoint with changes from the ackMessageFileMap
2046                    checkpointUpdate(false);
2047                }
2048            } catch (IOException ioe) {
2049                LOG.error("Checkpoint failed", ioe);
2050                brokerService.handleIOException(ioe);
2051            } catch (Throwable e) {
2052                LOG.error("Checkpoint failed", e);
2053                brokerService.handleIOException(IOExceptionSupport.create(e));
2054            }
2055        }
2056    }
2057
2058    private void forwardAllAcks(Integer journalToRead, Set<Integer> journalLogsReferenced) throws IllegalStateException, IOException {
2059        LOG.trace("Attempting to move all acks in journal:{} to the front.", journalToRead);
2060
2061        DataFile forwardsFile = journal.reserveDataFile();
2062        forwardsFile.setTypeCode(COMPACTED_JOURNAL_FILE);
2063        LOG.trace("Reserved file for forwarded acks: {}", forwardsFile);
2064
2065        Map<Integer, Set<Integer>> updatedAckLocations = new HashMap<>();
2066
2067        try (TargetedDataFileAppender appender = new TargetedDataFileAppender(journal, forwardsFile);) {
2068            KahaRewrittenDataFileCommand compactionMarker = new KahaRewrittenDataFileCommand();
2069            compactionMarker.setSourceDataFileId(journalToRead);
2070            compactionMarker.setRewriteType(forwardsFile.getTypeCode());
2071
2072            ByteSequence payload = toByteSequence(compactionMarker);
2073            appender.storeItem(payload, Journal.USER_RECORD_TYPE, false);
2074            LOG.trace("Marked ack rewrites file as replacing file: {}", journalToRead);
2075
2076            final Location limit = new Location(journalToRead + 1, 0);
2077            Location nextLocation = getNextLocationForAckForward(new Location(journalToRead, 0), limit);
2078            while (nextLocation != null) {
2079                JournalCommand<?> command = null;
2080                try {
2081                    command = load(nextLocation);
2082                } catch (IOException ex) {
2083                    LOG.trace("Error loading command during ack forward: {}", nextLocation);
2084                }
2085
2086                if (command != null && command instanceof KahaRemoveMessageCommand) {
2087                    payload = toByteSequence(command);
2088                    Location location = appender.storeItem(payload, Journal.USER_RECORD_TYPE, false);
2089                    updatedAckLocations.put(location.getDataFileId(), journalLogsReferenced);
2090                }
2091
2092                nextLocation = getNextLocationForAckForward(nextLocation, limit);
2093            }
2094        }
2095
2096        LOG.trace("ACKS forwarded, updates for ack locations: {}", updatedAckLocations);
2097
2098        // Lock index while we update the ackMessageFileMap.
2099        indexLock.writeLock().lock();
2100
2101        // Update the ack map with the new locations of the acks
2102        for (Entry<Integer, Set<Integer>> entry : updatedAckLocations.entrySet()) {
2103            Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(entry.getKey());
2104            if (referenceFileIds == null) {
2105                referenceFileIds = new HashSet<>();
2106                referenceFileIds.addAll(entry.getValue());
2107                metadata.ackMessageFileMap.put(entry.getKey(), referenceFileIds);
2108            } else {
2109                referenceFileIds.addAll(entry.getValue());
2110            }
2111        }
2112
2113        // remove the old location data from the ack map so that the old journal log file can
2114        // be removed on next GC.
2115        metadata.ackMessageFileMap.remove(journalToRead);
2116
2117        indexLock.writeLock().unlock();
2118
2119        LOG.trace("ACK File Map following updates: {}", metadata.ackMessageFileMap);
2120    }
2121
2122    private Location getNextLocationForAckForward(final Location nextLocation, final Location limit) {
2123        //getNextLocation() can throw an IOException, we should handle it and set
2124        //nextLocation to null and abort gracefully
2125        //Should not happen in the normal case
2126        Location location = null;
2127        try {
2128            location = journal.getNextLocation(nextLocation, limit);
2129        } catch (IOException e) {
2130            LOG.warn("Failed to load next journal location after: {}, reason: {}", nextLocation, e);
2131            if (LOG.isDebugEnabled()) {
2132                LOG.debug("Failed to load next journal location after: {}", nextLocation, e);
2133            }
2134        }
2135        return location;
2136    }
2137
2138    final Runnable nullCompletionCallback = new Runnable() {
2139        @Override
2140        public void run() {
2141        }
2142    };
2143
2144    private Location checkpointProducerAudit() throws IOException {
2145        if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) {
2146            ByteArrayOutputStream baos = new ByteArrayOutputStream();
2147            ObjectOutputStream oout = new ObjectOutputStream(baos);
2148            oout.writeObject(metadata.producerSequenceIdTracker);
2149            oout.flush();
2150            oout.close();
2151            // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
2152            Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback);
2153            try {
2154                location.getLatch().await();
2155                if (location.getException().get() != null) {
2156                    throw location.getException().get();
2157                }
2158            } catch (InterruptedException e) {
2159                throw new InterruptedIOException(e.toString());
2160            }
2161            return location;
2162        }
2163        return metadata.producerSequenceIdTrackerLocation;
2164    }
2165
2166    private Location checkpointAckMessageFileMap() throws IOException {
2167        ByteArrayOutputStream baos = new ByteArrayOutputStream();
2168        ObjectOutputStream oout = new ObjectOutputStream(baos);
2169        oout.writeObject(metadata.ackMessageFileMap);
2170        oout.flush();
2171        oout.close();
2172        // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
2173        Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback);
2174        try {
2175            location.getLatch().await();
2176        } catch (InterruptedException e) {
2177            throw new InterruptedIOException(e.toString());
2178        }
2179        return location;
2180    }
2181
2182    private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException {
2183
2184        ByteSequence sequence = toByteSequence(subscription);
2185        Location location = journal.write(sequence, nullCompletionCallback) ;
2186
2187        try {
2188            location.getLatch().await();
2189        } catch (InterruptedException e) {
2190            throw new InterruptedIOException(e.toString());
2191        }
2192        return location;
2193    }
2194
2195    public HashSet<Integer> getJournalFilesBeingReplicated() {
2196        return journalFilesBeingReplicated;
2197    }
2198
2199    // /////////////////////////////////////////////////////////////////
2200    // StoredDestination related implementation methods.
2201    // /////////////////////////////////////////////////////////////////
2202
2203    protected final HashMap<String, StoredDestination> storedDestinations = new HashMap<>();
2204
2205    static class MessageKeys {
2206        final String messageId;
2207        final Location location;
2208
2209        public MessageKeys(String messageId, Location location) {
2210            this.messageId=messageId;
2211            this.location=location;
2212        }
2213
2214        @Override
2215        public String toString() {
2216            return "["+messageId+","+location+"]";
2217        }
2218    }
2219
2220    protected class MessageKeysMarshaller extends VariableMarshaller<MessageKeys> {
2221        final LocationSizeMarshaller locationSizeMarshaller = new LocationSizeMarshaller();
2222
2223        @Override
2224        public MessageKeys readPayload(DataInput dataIn) throws IOException {
2225            return new MessageKeys(dataIn.readUTF(), locationSizeMarshaller.readPayload(dataIn));
2226        }
2227
2228        @Override
2229        public void writePayload(MessageKeys object, DataOutput dataOut) throws IOException {
2230            dataOut.writeUTF(object.messageId);
2231            locationSizeMarshaller.writePayload(object.location, dataOut);
2232        }
2233    }
2234
2235    class LastAck {
2236        long lastAckedSequence;
2237        byte priority;
2238
2239        public LastAck(LastAck source) {
2240            this.lastAckedSequence = source.lastAckedSequence;
2241            this.priority = source.priority;
2242        }
2243
2244        public LastAck() {
2245            this.priority = MessageOrderIndex.HI;
2246        }
2247
2248        public LastAck(long ackLocation) {
2249            this.lastAckedSequence = ackLocation;
2250            this.priority = MessageOrderIndex.LO;
2251        }
2252
2253        public LastAck(long ackLocation, byte priority) {
2254            this.lastAckedSequence = ackLocation;
2255            this.priority = priority;
2256        }
2257
2258        @Override
2259        public String toString() {
2260            return "[" + lastAckedSequence + ":" + priority + "]";
2261        }
2262    }
2263
2264    protected class LastAckMarshaller implements Marshaller<LastAck> {
2265
2266        @Override
2267        public void writePayload(LastAck object, DataOutput dataOut) throws IOException {
2268            dataOut.writeLong(object.lastAckedSequence);
2269            dataOut.writeByte(object.priority);
2270        }
2271
2272        @Override
2273        public LastAck readPayload(DataInput dataIn) throws IOException {
2274            LastAck lastAcked = new LastAck();
2275            lastAcked.lastAckedSequence = dataIn.readLong();
2276            if (metadata.version >= 3) {
2277                lastAcked.priority = dataIn.readByte();
2278            }
2279            return lastAcked;
2280        }
2281
2282        @Override
2283        public int getFixedSize() {
2284            return 9;
2285        }
2286
2287        @Override
2288        public LastAck deepCopy(LastAck source) {
2289            return new LastAck(source);
2290        }
2291
2292        @Override
2293        public boolean isDeepCopySupported() {
2294            return true;
2295        }
2296    }
2297
2298    class StoredDestination {
2299
2300        MessageOrderIndex orderIndex = new MessageOrderIndex();
2301        BTreeIndex<Location, Long> locationIndex;
2302        BTreeIndex<String, Long> messageIdIndex;
2303
2304        // These bits are only set for Topics
2305        BTreeIndex<String, KahaSubscriptionCommand> subscriptions;
2306        BTreeIndex<String, LastAck> subscriptionAcks;
2307        HashMap<String, MessageOrderCursor> subscriptionCursors;
2308        ListIndex<String, SequenceSet> ackPositions;
2309        ListIndex<String, Location> subLocations;
2310
2311        // Transient data used to track which Messages are no longer needed.
2312        final TreeMap<Long, Long> messageReferences = new TreeMap<>();
2313        final HashSet<String> subscriptionCache = new LinkedHashSet<>();
2314
2315        public void trackPendingAdd(Long seq) {
2316            orderIndex.trackPendingAdd(seq);
2317        }
2318
2319        public void trackPendingAddComplete(Long seq) {
2320            orderIndex.trackPendingAddComplete(seq);
2321        }
2322
2323        @Override
2324        public String toString() {
2325            return "nextSeq:" + orderIndex.nextMessageId + ",lastRet:" + orderIndex.cursor + ",pending:" + orderIndex.pendingAdditions.size();
2326        }
2327    }
2328
2329    protected class StoredDestinationMarshaller extends VariableMarshaller<StoredDestination> {
2330
2331        final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller();
2332
2333        @Override
2334        public StoredDestination readPayload(final DataInput dataIn) throws IOException {
2335            final StoredDestination value = new StoredDestination();
2336            value.orderIndex.defaultPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2337            value.locationIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2338            value.messageIdIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2339
2340            if (dataIn.readBoolean()) {
2341                value.subscriptions = new BTreeIndex<>(pageFile, dataIn.readLong());
2342                value.subscriptionAcks = new BTreeIndex<>(pageFile, dataIn.readLong());
2343                if (metadata.version >= 4) {
2344                    value.ackPositions = new ListIndex<>(pageFile, dataIn.readLong());
2345                } else {
2346                    // upgrade
2347                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
2348                        @Override
2349                        public void execute(Transaction tx) throws IOException {
2350                            LinkedHashMap<String, SequenceSet> temp = new LinkedHashMap<>();
2351
2352                            if (metadata.version >= 3) {
2353                                // migrate
2354                                BTreeIndex<Long, HashSet<String>> oldAckPositions =
2355                                        new BTreeIndex<>(pageFile, dataIn.readLong());
2356                                oldAckPositions.setKeyMarshaller(LongMarshaller.INSTANCE);
2357                                oldAckPositions.setValueMarshaller(HashSetStringMarshaller.INSTANCE);
2358                                oldAckPositions.load(tx);
2359
2360
2361                                // Do the initial build of the data in memory before writing into the store
2362                                // based Ack Positions List to avoid a lot of disk thrashing.
2363                                Iterator<Entry<Long, HashSet<String>>> iterator = oldAckPositions.iterator(tx);
2364                                while (iterator.hasNext()) {
2365                                    Entry<Long, HashSet<String>> entry = iterator.next();
2366
2367                                    for(String subKey : entry.getValue()) {
2368                                        SequenceSet pendingAcks = temp.get(subKey);
2369                                        if (pendingAcks == null) {
2370                                            pendingAcks = new SequenceSet();
2371                                            temp.put(subKey, pendingAcks);
2372                                        }
2373
2374                                        pendingAcks.add(entry.getKey());
2375                                    }
2376                                }
2377                            }
2378                            // Now move the pending messages to ack data into the store backed
2379                            // structure.
2380                            value.ackPositions = new ListIndex<>(pageFile, tx.allocate());
2381                            value.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE);
2382                            value.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE);
2383                            value.ackPositions.load(tx);
2384                            for(String subscriptionKey : temp.keySet()) {
2385                                value.ackPositions.put(tx, subscriptionKey, temp.get(subscriptionKey));
2386                            }
2387
2388                        }
2389                    });
2390                }
2391
2392                if (metadata.version >= 5) {
2393                    value.subLocations = new ListIndex<>(pageFile, dataIn.readLong());
2394                } else {
2395                    // upgrade
2396                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
2397                        @Override
2398                        public void execute(Transaction tx) throws IOException {
2399                            value.subLocations = new ListIndex<>(pageFile, tx.allocate());
2400                            value.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE);
2401                            value.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE);
2402                            value.subLocations.load(tx);
2403                        }
2404                    });
2405                }
2406            }
2407            if (metadata.version >= 2) {
2408                value.orderIndex.lowPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2409                value.orderIndex.highPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2410            } else {
2411                // upgrade
2412                pageFile.tx().execute(new Transaction.Closure<IOException>() {
2413                    @Override
2414                    public void execute(Transaction tx) throws IOException {
2415                        value.orderIndex.lowPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
2416                        value.orderIndex.lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
2417                        value.orderIndex.lowPriorityIndex.setValueMarshaller(messageKeysMarshaller);
2418                        value.orderIndex.lowPriorityIndex.load(tx);
2419
2420                        value.orderIndex.highPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
2421                        value.orderIndex.highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
2422                        value.orderIndex.highPriorityIndex.setValueMarshaller(messageKeysMarshaller);
2423                        value.orderIndex.highPriorityIndex.load(tx);
2424                    }
2425                });
2426            }
2427
2428            return value;
2429        }
2430
2431        @Override
2432        public void writePayload(StoredDestination value, DataOutput dataOut) throws IOException {
2433            dataOut.writeLong(value.orderIndex.defaultPriorityIndex.getPageId());
2434            dataOut.writeLong(value.locationIndex.getPageId());
2435            dataOut.writeLong(value.messageIdIndex.getPageId());
2436            if (value.subscriptions != null) {
2437                dataOut.writeBoolean(true);
2438                dataOut.writeLong(value.subscriptions.getPageId());
2439                dataOut.writeLong(value.subscriptionAcks.getPageId());
2440                dataOut.writeLong(value.ackPositions.getHeadPageId());
2441                dataOut.writeLong(value.subLocations.getHeadPageId());
2442            } else {
2443                dataOut.writeBoolean(false);
2444            }
2445            dataOut.writeLong(value.orderIndex.lowPriorityIndex.getPageId());
2446            dataOut.writeLong(value.orderIndex.highPriorityIndex.getPageId());
2447        }
2448    }
2449
2450    static class KahaSubscriptionCommandMarshaller extends VariableMarshaller<KahaSubscriptionCommand> {
2451        final static KahaSubscriptionCommandMarshaller INSTANCE = new KahaSubscriptionCommandMarshaller();
2452
2453        @Override
2454        public KahaSubscriptionCommand readPayload(DataInput dataIn) throws IOException {
2455            KahaSubscriptionCommand rc = new KahaSubscriptionCommand();
2456            rc.mergeFramed((InputStream)dataIn);
2457            return rc;
2458        }
2459
2460        @Override
2461        public void writePayload(KahaSubscriptionCommand object, DataOutput dataOut) throws IOException {
2462            object.writeFramed((OutputStream)dataOut);
2463        }
2464    }
2465
2466    protected StoredDestination getStoredDestination(KahaDestination destination, Transaction tx) throws IOException {
2467        String key = key(destination);
2468        StoredDestination rc = storedDestinations.get(key);
2469        if (rc == null) {
2470            boolean topic = destination.getType() == KahaDestination.DestinationType.TOPIC || destination.getType() == KahaDestination.DestinationType.TEMP_TOPIC;
2471            rc = loadStoredDestination(tx, key, topic);
2472            // Cache it. We may want to remove/unload destinations from the
2473            // cache that are not used for a while
2474            // to reduce memory usage.
2475            storedDestinations.put(key, rc);
2476        }
2477        return rc;
2478    }
2479
2480    protected StoredDestination getExistingStoredDestination(KahaDestination destination, Transaction tx) throws IOException {
2481        String key = key(destination);
2482        StoredDestination rc = storedDestinations.get(key);
2483        if (rc == null && metadata.destinations.containsKey(tx, key)) {
2484            rc = getStoredDestination(destination, tx);
2485        }
2486        return rc;
2487    }
2488
2489    /**
2490     * @param tx
2491     * @param key
2492     * @param topic
2493     * @return
2494     * @throws IOException
2495     */
2496    private StoredDestination loadStoredDestination(Transaction tx, String key, boolean topic) throws IOException {
2497        // Try to load the existing indexes..
2498        StoredDestination rc = metadata.destinations.get(tx, key);
2499        if (rc == null) {
2500            // Brand new destination.. allocate indexes for it.
2501            rc = new StoredDestination();
2502            rc.orderIndex.allocate(tx);
2503            rc.locationIndex = new BTreeIndex<>(pageFile, tx.allocate());
2504            rc.messageIdIndex = new BTreeIndex<>(pageFile, tx.allocate());
2505
2506            if (topic) {
2507                rc.subscriptions = new BTreeIndex<>(pageFile, tx.allocate());
2508                rc.subscriptionAcks = new BTreeIndex<>(pageFile, tx.allocate());
2509                rc.ackPositions = new ListIndex<>(pageFile, tx.allocate());
2510                rc.subLocations = new ListIndex<>(pageFile, tx.allocate());
2511            }
2512            metadata.destinations.put(tx, key, rc);
2513        }
2514
2515        // Configure the marshalers and load.
2516        rc.orderIndex.load(tx);
2517
2518        // Figure out the next key using the last entry in the destination.
2519        rc.orderIndex.configureLast(tx);
2520
2521        rc.locationIndex.setKeyMarshaller(new LocationSizeMarshaller());
2522        rc.locationIndex.setValueMarshaller(LongMarshaller.INSTANCE);
2523        rc.locationIndex.load(tx);
2524
2525        rc.messageIdIndex.setKeyMarshaller(StringMarshaller.INSTANCE);
2526        rc.messageIdIndex.setValueMarshaller(LongMarshaller.INSTANCE);
2527        rc.messageIdIndex.load(tx);
2528
2529        //go through an upgrade old index if older than version 6
2530        if (metadata.version < 6) {
2531            for (Iterator<Entry<Location, Long>> iterator = rc.locationIndex.iterator(tx); iterator.hasNext(); ) {
2532                Entry<Location, Long> entry = iterator.next();
2533                // modify so it is upgraded
2534                rc.locationIndex.put(tx, entry.getKey(), entry.getValue());
2535            }
2536            //upgrade the order index
2537            for (Iterator<Entry<Long, MessageKeys>> iterator = rc.orderIndex.iterator(tx); iterator.hasNext(); ) {
2538                Entry<Long, MessageKeys> entry = iterator.next();
2539                //call get so that the last priority is updated
2540                rc.orderIndex.get(tx, entry.getKey());
2541                rc.orderIndex.put(tx, rc.orderIndex.lastGetPriority(), entry.getKey(), entry.getValue());
2542            }
2543        }
2544
2545        // If it was a topic...
2546        if (topic) {
2547
2548            rc.subscriptions.setKeyMarshaller(StringMarshaller.INSTANCE);
2549            rc.subscriptions.setValueMarshaller(KahaSubscriptionCommandMarshaller.INSTANCE);
2550            rc.subscriptions.load(tx);
2551
2552            rc.subscriptionAcks.setKeyMarshaller(StringMarshaller.INSTANCE);
2553            rc.subscriptionAcks.setValueMarshaller(new LastAckMarshaller());
2554            rc.subscriptionAcks.load(tx);
2555
2556            rc.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE);
2557            rc.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE);
2558            rc.ackPositions.load(tx);
2559
2560            rc.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE);
2561            rc.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE);
2562            rc.subLocations.load(tx);
2563
2564            rc.subscriptionCursors = new HashMap<>();
2565
2566            if (metadata.version < 3) {
2567
2568                // on upgrade need to fill ackLocation with available messages past last ack
2569                for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) {
2570                    Entry<String, LastAck> entry = iterator.next();
2571                    for (Iterator<Entry<Long, MessageKeys>> orderIterator =
2572                            rc.orderIndex.iterator(tx, new MessageOrderCursor(entry.getValue().lastAckedSequence)); orderIterator.hasNext(); ) {
2573                        Long sequence = orderIterator.next().getKey();
2574                        addAckLocation(tx, rc, sequence, entry.getKey());
2575                    }
2576                    // modify so it is upgraded
2577                    rc.subscriptionAcks.put(tx, entry.getKey(), entry.getValue());
2578                }
2579            }
2580
2581            // Configure the message references index
2582            Iterator<Entry<String, SequenceSet>> subscriptions = rc.ackPositions.iterator(tx);
2583            while (subscriptions.hasNext()) {
2584                Entry<String, SequenceSet> subscription = subscriptions.next();
2585                SequenceSet pendingAcks = subscription.getValue();
2586                if (pendingAcks != null && !pendingAcks.isEmpty()) {
2587                    Long lastPendingAck = pendingAcks.getTail().getLast();
2588                    for (Long sequenceId : pendingAcks) {
2589                        Long current = rc.messageReferences.get(sequenceId);
2590                        if (current == null) {
2591                            current = new Long(0);
2592                        }
2593
2594                        // We always add a trailing empty entry for the next position to start from
2595                        // so we need to ensure we don't count that as a message reference on reload.
2596                        if (!sequenceId.equals(lastPendingAck)) {
2597                            current = current.longValue() + 1;
2598                        } else {
2599                            current = Long.valueOf(0L);
2600                        }
2601
2602                        rc.messageReferences.put(sequenceId, current);
2603                    }
2604                }
2605            }
2606
2607            // Configure the subscription cache
2608            for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) {
2609                Entry<String, LastAck> entry = iterator.next();
2610                rc.subscriptionCache.add(entry.getKey());
2611            }
2612
2613            if (rc.orderIndex.nextMessageId == 0) {
2614                // check for existing durable sub all acked out - pull next seq from acks as messages are gone
2615                if (!rc.subscriptionAcks.isEmpty(tx)) {
2616                    for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext();) {
2617                        Entry<String, LastAck> entry = iterator.next();
2618                        rc.orderIndex.nextMessageId =
2619                                Math.max(rc.orderIndex.nextMessageId, entry.getValue().lastAckedSequence +1);
2620                    }
2621                }
2622            } else {
2623                // update based on ackPositions for unmatched, last entry is always the next
2624                if (!rc.messageReferences.isEmpty()) {
2625                    Long nextMessageId = (Long) rc.messageReferences.keySet().toArray()[rc.messageReferences.size() - 1];
2626                    rc.orderIndex.nextMessageId =
2627                            Math.max(rc.orderIndex.nextMessageId, nextMessageId);
2628                }
2629            }
2630        }
2631
2632        if (metadata.version < VERSION) {
2633            // store again after upgrade
2634            metadata.destinations.put(tx, key, rc);
2635        }
2636        return rc;
2637    }
2638
2639    /**
2640     * Clear the counter for the destination, if one exists.
2641     *
2642     * @param kahaDestination
2643     */
2644    protected void clearStoreStats(KahaDestination kahaDestination) {
2645        String key = key(kahaDestination);
2646        MessageStoreStatistics storeStats = getStoreStats(key);
2647        MessageStoreSubscriptionStatistics subStats = getSubStats(key);
2648        if (storeStats != null) {
2649            storeStats.reset();
2650        }
2651        if (subStats != null) {
2652            subStats.reset();
2653        }
2654    }
2655
2656    /**
2657     * Update MessageStoreStatistics
2658     *
2659     * @param kahaDestination
2660     * @param size
2661     */
2662    protected void incrementAndAddSizeToStoreStat(KahaDestination kahaDestination, long size) {
2663        incrementAndAddSizeToStoreStat(key(kahaDestination), size);
2664    }
2665
2666    protected void incrementAndAddSizeToStoreStat(String kahaDestKey, long size) {
2667        MessageStoreStatistics storeStats = getStoreStats(kahaDestKey);
2668        if (storeStats != null) {
2669            storeStats.getMessageCount().increment();
2670            if (size > 0) {
2671                storeStats.getMessageSize().addSize(size);
2672            }
2673        }
2674    }
2675
2676    protected void decrementAndSubSizeToStoreStat(KahaDestination kahaDestination, long size) {
2677        decrementAndSubSizeToStoreStat(key(kahaDestination), size);
2678    }
2679
2680    protected void decrementAndSubSizeToStoreStat(String kahaDestKey, long size) {
2681        MessageStoreStatistics storeStats = getStoreStats(kahaDestKey);
2682        if (storeStats != null) {
2683            storeStats.getMessageCount().decrement();
2684            if (size > 0) {
2685                storeStats.getMessageSize().addSize(-size);
2686            }
2687        }
2688    }
2689
2690    protected void incrementAndAddSizeToStoreStat(KahaDestination kahaDestination, String subKey, long size) {
2691        incrementAndAddSizeToStoreStat(key(kahaDestination), subKey, size);
2692    }
2693
2694    protected void incrementAndAddSizeToStoreStat(String kahaDestKey, String subKey, long size) {
2695        if (enableSubscriptionStatistics) {
2696            MessageStoreSubscriptionStatistics subStats = getSubStats(kahaDestKey);
2697            if (subStats != null && subKey != null) {
2698                subStats.getMessageCount(subKey).increment();
2699                if (size > 0) {
2700                    subStats.getMessageSize(subKey).addSize(size);
2701                }
2702            }
2703        }
2704    }
2705
2706
2707    protected void decrementAndSubSizeToStoreStat(String kahaDestKey, String subKey, long size) {
2708        if (enableSubscriptionStatistics) {
2709            MessageStoreSubscriptionStatistics subStats = getSubStats(kahaDestKey);
2710            if (subStats != null && subKey != null) {
2711                subStats.getMessageCount(subKey).decrement();
2712                if (size > 0) {
2713                    subStats.getMessageSize(subKey).addSize(-size);
2714                }
2715            }
2716        }
2717    }
2718
2719    protected void decrementAndSubSizeToStoreStat(KahaDestination kahaDestination, String subKey, long size) {
2720        decrementAndSubSizeToStoreStat(key(kahaDestination), subKey, size);
2721    }
2722
2723    /**
2724     * This is a map to cache MessageStores for a specific
2725     * KahaDestination key
2726     */
2727    protected final ConcurrentMap<String, MessageStore> storeCache =
2728            new ConcurrentHashMap<>();
2729
2730    /**
2731     * Locate the storeMessageSize counter for this KahaDestination
2732     */
2733    protected MessageStoreStatistics getStoreStats(String kahaDestKey) {
2734        MessageStoreStatistics storeStats = null;
2735        try {
2736            MessageStore messageStore = storeCache.get(kahaDestKey);
2737            if (messageStore != null) {
2738                storeStats = messageStore.getMessageStoreStatistics();
2739            }
2740        } catch (Exception e1) {
2741             LOG.error("Getting size counter of destination failed", e1);
2742        }
2743
2744        return storeStats;
2745    }
2746
2747    protected MessageStoreSubscriptionStatistics getSubStats(String kahaDestKey) {
2748        MessageStoreSubscriptionStatistics subStats = null;
2749        try {
2750            MessageStore messageStore = storeCache.get(kahaDestKey);
2751            if (messageStore instanceof TopicMessageStore) {
2752                subStats = ((TopicMessageStore)messageStore).getMessageStoreSubStatistics();
2753            }
2754        } catch (Exception e1) {
2755             LOG.error("Getting size counter of destination failed", e1);
2756        }
2757
2758        return subStats;
2759    }
2760
2761    /**
2762     * Determine whether this Destination matches the DestinationType
2763     *
2764     * @param destination
2765     * @param type
2766     * @return
2767     */
2768    protected boolean matchType(Destination destination,
2769            KahaDestination.DestinationType type) {
2770        if (destination instanceof Topic
2771                && type.equals(KahaDestination.DestinationType.TOPIC)) {
2772            return true;
2773        } else if (destination instanceof Queue
2774                && type.equals(KahaDestination.DestinationType.QUEUE)) {
2775            return true;
2776        }
2777        return false;
2778    }
2779
2780    class LocationSizeMarshaller implements Marshaller<Location> {
2781
2782        public LocationSizeMarshaller() {
2783
2784        }
2785
2786        @Override
2787        public Location readPayload(DataInput dataIn) throws IOException {
2788            Location rc = new Location();
2789            rc.setDataFileId(dataIn.readInt());
2790            rc.setOffset(dataIn.readInt());
2791            if (metadata.version >= 6) {
2792                rc.setSize(dataIn.readInt());
2793            }
2794            return rc;
2795        }
2796
2797        @Override
2798        public void writePayload(Location object, DataOutput dataOut)
2799                throws IOException {
2800            dataOut.writeInt(object.getDataFileId());
2801            dataOut.writeInt(object.getOffset());
2802            dataOut.writeInt(object.getSize());
2803        }
2804
2805        @Override
2806        public int getFixedSize() {
2807            return 12;
2808        }
2809
2810        @Override
2811        public Location deepCopy(Location source) {
2812            return new Location(source);
2813        }
2814
2815        @Override
2816        public boolean isDeepCopySupported() {
2817            return true;
2818        }
2819    }
2820
2821    private void addAckLocation(Transaction tx, StoredDestination sd, Long messageSequence, String subscriptionKey) throws IOException {
2822        SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey);
2823        if (sequences == null) {
2824            sequences = new SequenceSet();
2825            sequences.add(messageSequence);
2826            sd.ackPositions.add(tx, subscriptionKey, sequences);
2827        } else {
2828            sequences.add(messageSequence);
2829            sd.ackPositions.put(tx, subscriptionKey, sequences);
2830        }
2831
2832        Long count = sd.messageReferences.get(messageSequence);
2833        if (count == null) {
2834            count = Long.valueOf(0L);
2835        }
2836        count = count.longValue() + 1;
2837        sd.messageReferences.put(messageSequence, count);
2838    }
2839
2840    // new sub is interested in potentially all existing messages
2841    private void addAckLocationForRetroactiveSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2842        SequenceSet allOutstanding = new SequenceSet();
2843        Iterator<Map.Entry<String, SequenceSet>> iterator = sd.ackPositions.iterator(tx);
2844        while (iterator.hasNext()) {
2845            SequenceSet set = iterator.next().getValue();
2846            for (Long entry : set) {
2847                allOutstanding.add(entry);
2848            }
2849        }
2850        sd.ackPositions.put(tx, subscriptionKey, allOutstanding);
2851
2852        for (Long ackPosition : allOutstanding) {
2853            Long count = sd.messageReferences.get(ackPosition);
2854
2855            // There might not be a reference if the ackLocation was the last
2856            // one which is a placeholder for the next incoming message and
2857            // no value was added to the message references table.
2858            if (count != null) {
2859                count = count.longValue() + 1;
2860                sd.messageReferences.put(ackPosition, count);
2861            }
2862        }
2863    }
2864
2865    // on a new message add, all existing subs are interested in this message
2866    private void addAckLocationForNewMessage(Transaction tx, KahaDestination kahaDest,
2867            StoredDestination sd, Long messageSequence) throws IOException {
2868        for(String subscriptionKey : sd.subscriptionCache) {
2869            SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey);
2870            if (sequences == null) {
2871                sequences = new SequenceSet();
2872                sequences.add(new Sequence(messageSequence, messageSequence + 1));
2873                sd.ackPositions.add(tx, subscriptionKey, sequences);
2874            } else {
2875                sequences.add(new Sequence(messageSequence, messageSequence + 1));
2876                sd.ackPositions.put(tx, subscriptionKey, sequences);
2877            }
2878
2879            MessageKeys key = sd.orderIndex.get(tx, messageSequence);
2880            incrementAndAddSizeToStoreStat(kahaDest, subscriptionKey,
2881                    key.location.getSize());
2882
2883            Long count = sd.messageReferences.get(messageSequence);
2884            if (count == null) {
2885                count = Long.valueOf(0L);
2886            }
2887            count = count.longValue() + 1;
2888            sd.messageReferences.put(messageSequence, count);
2889            sd.messageReferences.put(messageSequence + 1, Long.valueOf(0L));
2890        }
2891    }
2892
2893    private void removeAckLocationsForSub(KahaSubscriptionCommand command,
2894            Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2895        if (!sd.ackPositions.isEmpty(tx)) {
2896            SequenceSet sequences = sd.ackPositions.remove(tx, subscriptionKey);
2897            if (sequences == null || sequences.isEmpty()) {
2898                return;
2899            }
2900
2901            ArrayList<Long> unreferenced = new ArrayList<>();
2902
2903            for(Long sequenceId : sequences) {
2904                Long references = sd.messageReferences.get(sequenceId);
2905                if (references != null) {
2906                    references = references.longValue() - 1;
2907
2908                    if (references.longValue() > 0) {
2909                        sd.messageReferences.put(sequenceId, references);
2910                    } else {
2911                        sd.messageReferences.remove(sequenceId);
2912                        unreferenced.add(sequenceId);
2913                    }
2914                }
2915            }
2916
2917            for(Long sequenceId : unreferenced) {
2918                // Find all the entries that need to get deleted.
2919                ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<>();
2920                sd.orderIndex.getDeleteList(tx, deletes, sequenceId);
2921
2922                // Do the actual deletes.
2923                for (Entry<Long, MessageKeys> entry : deletes) {
2924                    sd.locationIndex.remove(tx, entry.getValue().location);
2925                    sd.messageIdIndex.remove(tx, entry.getValue().messageId);
2926                    sd.orderIndex.remove(tx, entry.getKey());
2927                    decrementAndSubSizeToStoreStat(command.getDestination(), entry.getValue().location.getSize());
2928                }
2929            }
2930        }
2931    }
2932
2933    /**
2934     * @param tx
2935     * @param sd
2936     * @param subscriptionKey
2937     * @param messageSequence
2938     * @throws IOException
2939     */
2940    private void removeAckLocation(KahaRemoveMessageCommand command,
2941            Transaction tx, StoredDestination sd, String subscriptionKey,
2942            Long messageSequence) throws IOException {
2943        // Remove the sub from the previous location set..
2944        if (messageSequence != null) {
2945            SequenceSet range = sd.ackPositions.get(tx, subscriptionKey);
2946            if (range != null && !range.isEmpty()) {
2947                range.remove(messageSequence);
2948                if (!range.isEmpty()) {
2949                    sd.ackPositions.put(tx, subscriptionKey, range);
2950                } else {
2951                    sd.ackPositions.remove(tx, subscriptionKey);
2952                }
2953
2954                MessageKeys key = sd.orderIndex.get(tx, messageSequence);
2955                decrementAndSubSizeToStoreStat(command.getDestination(), subscriptionKey,
2956                        key.location.getSize());
2957
2958                // Check if the message is reference by any other subscription.
2959                Long count = sd.messageReferences.get(messageSequence);
2960                if (count != null) {
2961                    long references = count.longValue() - 1;
2962                    if (references > 0) {
2963                        sd.messageReferences.put(messageSequence, Long.valueOf(references));
2964                        return;
2965                    } else {
2966                        sd.messageReferences.remove(messageSequence);
2967                    }
2968                }
2969
2970                // Find all the entries that need to get deleted.
2971                ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<>();
2972                sd.orderIndex.getDeleteList(tx, deletes, messageSequence);
2973
2974                // Do the actual deletes.
2975                for (Entry<Long, MessageKeys> entry : deletes) {
2976                    sd.locationIndex.remove(tx, entry.getValue().location);
2977                    sd.messageIdIndex.remove(tx, entry.getValue().messageId);
2978                    sd.orderIndex.remove(tx, entry.getKey());
2979                    decrementAndSubSizeToStoreStat(command.getDestination(), entry.getValue().location.getSize());
2980                }
2981            }
2982        }
2983    }
2984
2985    public LastAck getLastAck(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2986        return sd.subscriptionAcks.get(tx, subscriptionKey);
2987    }
2988
2989    protected long getStoredMessageCount(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2990        if (sd.ackPositions != null) {
2991            SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
2992            if (messageSequences != null) {
2993                long result = messageSequences.rangeSize();
2994                // if there's anything in the range the last value is always the nextMessage marker, so remove 1.
2995                return result > 0 ? result - 1 : 0;
2996            }
2997        }
2998
2999        return 0;
3000    }
3001
3002    protected long getStoredMessageSize(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
3003        long locationSize = 0;
3004
3005        if (sd.ackPositions != null) {
3006            //grab the messages attached to this subscription
3007            SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
3008
3009            if (messageSequences != null) {
3010                Sequence head = messageSequences.getHead();
3011                if (head != null) {
3012                    //get an iterator over the order index starting at the first unacked message
3013                    //and go over each message to add up the size
3014                    Iterator<Entry<Long, MessageKeys>> iterator = sd.orderIndex.iterator(tx,
3015                            new MessageOrderCursor(head.getFirst()));
3016
3017                    while (iterator.hasNext()) {
3018                        Entry<Long, MessageKeys> entry = iterator.next();
3019                        locationSize += entry.getValue().location.getSize();
3020                    }
3021                }
3022            }
3023        }
3024
3025        return locationSize;
3026    }
3027
3028    protected String key(KahaDestination destination) {
3029        return destination.getType().getNumber() + ":" + destination.getName();
3030    }
3031
3032    // /////////////////////////////////////////////////////////////////
3033    // Transaction related implementation methods.
3034    // /////////////////////////////////////////////////////////////////
3035    @SuppressWarnings("rawtypes")
3036    private final LinkedHashMap<TransactionId, List<Operation>> inflightTransactions = new LinkedHashMap<>();
3037    @SuppressWarnings("rawtypes")
3038    protected final LinkedHashMap<TransactionId, List<Operation>> preparedTransactions = new LinkedHashMap<>();
3039    protected final Set<String> ackedAndPrepared = new HashSet<>();
3040    protected final Set<String> rolledBackAcks = new HashSet<>();
3041
3042    // messages that have prepared (pending) acks cannot be re-dispatched unless the outcome is rollback,
3043    // till then they are skipped by the store.
3044    // 'at most once' XA guarantee
3045    public void trackRecoveredAcks(ArrayList<MessageAck> acks) {
3046        this.indexLock.writeLock().lock();
3047        try {
3048            for (MessageAck ack : acks) {
3049                ackedAndPrepared.add(ack.getLastMessageId().toProducerKey());
3050            }
3051        } finally {
3052            this.indexLock.writeLock().unlock();
3053        }
3054    }
3055
3056    public void forgetRecoveredAcks(ArrayList<MessageAck> acks, boolean rollback) throws IOException {
3057        if (acks != null) {
3058            this.indexLock.writeLock().lock();
3059            try {
3060                for (MessageAck ack : acks) {
3061                    final String id = ack.getLastMessageId().toProducerKey();
3062                    ackedAndPrepared.remove(id);
3063                    if (rollback) {
3064                        rolledBackAcks.add(id);
3065                    }
3066                }
3067            } finally {
3068                this.indexLock.writeLock().unlock();
3069            }
3070        }
3071    }
3072
3073    @SuppressWarnings("rawtypes")
3074    private List<Operation> getInflightTx(KahaTransactionInfo info) {
3075        TransactionId key = TransactionIdConversion.convert(info);
3076        List<Operation> tx;
3077        synchronized (inflightTransactions) {
3078            tx = inflightTransactions.get(key);
3079            if (tx == null) {
3080                tx = Collections.synchronizedList(new ArrayList<Operation>());
3081                inflightTransactions.put(key, tx);
3082            }
3083        }
3084        return tx;
3085    }
3086
3087    @SuppressWarnings("unused")
3088    private TransactionId key(KahaTransactionInfo transactionInfo) {
3089        return TransactionIdConversion.convert(transactionInfo);
3090    }
3091
3092    abstract class Operation <T extends JournalCommand<T>> {
3093        final T command;
3094        final Location location;
3095
3096        public Operation(T command, Location location) {
3097            this.command = command;
3098            this.location = location;
3099        }
3100
3101        public Location getLocation() {
3102            return location;
3103        }
3104
3105        public T getCommand() {
3106            return command;
3107        }
3108
3109        abstract public void execute(Transaction tx) throws IOException;
3110    }
3111
3112    class AddOperation extends Operation<KahaAddMessageCommand> {
3113        final IndexAware runWithIndexLock;
3114        public AddOperation(KahaAddMessageCommand command, Location location, IndexAware runWithIndexLock) {
3115            super(command, location);
3116            this.runWithIndexLock = runWithIndexLock;
3117        }
3118
3119        @Override
3120        public void execute(Transaction tx) throws IOException {
3121            long seq = updateIndex(tx, command, location);
3122            if (runWithIndexLock != null) {
3123                runWithIndexLock.sequenceAssignedWithIndexLocked(seq);
3124            }
3125        }
3126    }
3127
3128    class RemoveOperation extends Operation<KahaRemoveMessageCommand> {
3129
3130        public RemoveOperation(KahaRemoveMessageCommand command, Location location) {
3131            super(command, location);
3132        }
3133
3134        @Override
3135        public void execute(Transaction tx) throws IOException {
3136            updateIndex(tx, command, location);
3137        }
3138    }
3139
3140    // /////////////////////////////////////////////////////////////////
3141    // Initialization related implementation methods.
3142    // /////////////////////////////////////////////////////////////////
3143
3144    private PageFile createPageFile() throws IOException {
3145        if (indexDirectory == null) {
3146            indexDirectory = directory;
3147        }
3148        IOHelper.mkdirs(indexDirectory);
3149        PageFile index = new PageFile(indexDirectory, "db");
3150        index.setEnableWriteThread(isEnableIndexWriteAsync());
3151        index.setWriteBatchSize(getIndexWriteBatchSize());
3152        index.setPageCacheSize(indexCacheSize);
3153        index.setUseLFRUEviction(isUseIndexLFRUEviction());
3154        index.setLFUEvictionFactor(getIndexLFUEvictionFactor());
3155        index.setEnableDiskSyncs(isEnableIndexDiskSyncs());
3156        index.setEnableRecoveryFile(isEnableIndexRecoveryFile());
3157        index.setEnablePageCaching(isEnableIndexPageCaching());
3158        return index;
3159    }
3160
3161    protected Journal createJournal() throws IOException {
3162        Journal manager = new Journal();
3163        manager.setDirectory(directory);
3164        manager.setMaxFileLength(getJournalMaxFileLength());
3165        manager.setCheckForCorruptionOnStartup(checkForCorruptJournalFiles);
3166        manager.setChecksum(checksumJournalFiles || checkForCorruptJournalFiles);
3167        manager.setWriteBatchSize(getJournalMaxWriteBatchSize());
3168        manager.setArchiveDataLogs(isArchiveDataLogs());
3169        manager.setSizeAccumulator(journalSize);
3170        manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs());
3171        manager.setPreallocationScope(Journal.PreallocationScope.valueOf(preallocationScope.trim().toUpperCase()));
3172        manager.setPreallocationStrategy(
3173                Journal.PreallocationStrategy.valueOf(preallocationStrategy.trim().toUpperCase()));
3174        manager.setJournalDiskSyncStrategy(journalDiskSyncStrategy);
3175        if (getDirectoryArchive() != null) {
3176            IOHelper.mkdirs(getDirectoryArchive());
3177            manager.setDirectoryArchive(getDirectoryArchive());
3178        }
3179        return manager;
3180    }
3181
3182    private Metadata createMetadata() {
3183        Metadata md = new Metadata();
3184        md.producerSequenceIdTracker.setAuditDepth(getFailoverProducersAuditDepth());
3185        md.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(getMaxFailoverProducersToTrack());
3186        return md;
3187    }
3188
3189    protected abstract void configureMetadata();
3190
3191    public int getJournalMaxWriteBatchSize() {
3192        return journalMaxWriteBatchSize;
3193    }
3194
3195    public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) {
3196        this.journalMaxWriteBatchSize = journalMaxWriteBatchSize;
3197    }
3198
3199    public File getDirectory() {
3200        return directory;
3201    }
3202
3203    public void setDirectory(File directory) {
3204        this.directory = directory;
3205    }
3206
3207    public boolean isDeleteAllMessages() {
3208        return deleteAllMessages;
3209    }
3210
3211    public void setDeleteAllMessages(boolean deleteAllMessages) {
3212        this.deleteAllMessages = deleteAllMessages;
3213    }
3214
3215    public void setIndexWriteBatchSize(int setIndexWriteBatchSize) {
3216        this.setIndexWriteBatchSize = setIndexWriteBatchSize;
3217    }
3218
3219    public int getIndexWriteBatchSize() {
3220        return setIndexWriteBatchSize;
3221    }
3222
3223    public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) {
3224        this.enableIndexWriteAsync = enableIndexWriteAsync;
3225    }
3226
3227    boolean isEnableIndexWriteAsync() {
3228        return enableIndexWriteAsync;
3229    }
3230
3231    /**
3232     * @deprecated use {@link #getJournalDiskSyncStrategyEnum} or {@link #getJournalDiskSyncStrategy} instead
3233     * @return
3234     */
3235    @Deprecated
3236    public boolean isEnableJournalDiskSyncs() {
3237        return journalDiskSyncStrategy == JournalDiskSyncStrategy.ALWAYS;
3238    }
3239
3240    /**
3241     * @deprecated use {@link #setEnableJournalDiskSyncs} instead
3242     * @param syncWrites
3243     */
3244    @Deprecated
3245    public void setEnableJournalDiskSyncs(boolean syncWrites) {
3246        if (syncWrites) {
3247            journalDiskSyncStrategy = JournalDiskSyncStrategy.ALWAYS;
3248        } else {
3249            journalDiskSyncStrategy = JournalDiskSyncStrategy.NEVER;
3250        }
3251    }
3252
3253    public JournalDiskSyncStrategy getJournalDiskSyncStrategyEnum() {
3254        return journalDiskSyncStrategy;
3255    }
3256
3257    public String getJournalDiskSyncStrategy() {
3258        return journalDiskSyncStrategy.name();
3259    }
3260
3261    public void setJournalDiskSyncStrategy(String journalDiskSyncStrategy) {
3262        this.journalDiskSyncStrategy = JournalDiskSyncStrategy.valueOf(journalDiskSyncStrategy.trim().toUpperCase());
3263    }
3264
3265    public long getJournalDiskSyncInterval() {
3266        return journalDiskSyncInterval;
3267    }
3268
3269    public void setJournalDiskSyncInterval(long journalDiskSyncInterval) {
3270        this.journalDiskSyncInterval = journalDiskSyncInterval;
3271    }
3272
3273    public long getCheckpointInterval() {
3274        return checkpointInterval;
3275    }
3276
3277    public void setCheckpointInterval(long checkpointInterval) {
3278        this.checkpointInterval = checkpointInterval;
3279    }
3280
3281    public long getCleanupInterval() {
3282        return cleanupInterval;
3283    }
3284
3285    public void setCleanupInterval(long cleanupInterval) {
3286        this.cleanupInterval = cleanupInterval;
3287    }
3288
3289    public void setJournalMaxFileLength(int journalMaxFileLength) {
3290        this.journalMaxFileLength = journalMaxFileLength;
3291    }
3292
3293    public int getJournalMaxFileLength() {
3294        return journalMaxFileLength;
3295    }
3296
3297    public void setMaxFailoverProducersToTrack(int maxFailoverProducersToTrack) {
3298        this.metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack);
3299    }
3300
3301    public int getMaxFailoverProducersToTrack() {
3302        return this.metadata.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack();
3303    }
3304
3305    public void setFailoverProducersAuditDepth(int failoverProducersAuditDepth) {
3306        this.metadata.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth);
3307    }
3308
3309    public int getFailoverProducersAuditDepth() {
3310        return this.metadata.producerSequenceIdTracker.getAuditDepth();
3311    }
3312
3313    public PageFile getPageFile() throws IOException {
3314        if (pageFile == null) {
3315            pageFile = createPageFile();
3316        }
3317        return pageFile;
3318    }
3319
3320    public Journal getJournal() throws IOException {
3321        if (journal == null) {
3322            journal = createJournal();
3323        }
3324        return journal;
3325    }
3326
3327    protected Metadata getMetadata() {
3328        return metadata;
3329    }
3330
3331    public boolean isFailIfDatabaseIsLocked() {
3332        return failIfDatabaseIsLocked;
3333    }
3334
3335    public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) {
3336        this.failIfDatabaseIsLocked = failIfDatabaseIsLocked;
3337    }
3338
3339    public boolean isIgnoreMissingJournalfiles() {
3340        return ignoreMissingJournalfiles;
3341    }
3342
3343    public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) {
3344        this.ignoreMissingJournalfiles = ignoreMissingJournalfiles;
3345    }
3346
3347    public int getIndexCacheSize() {
3348        return indexCacheSize;
3349    }
3350
3351    public void setIndexCacheSize(int indexCacheSize) {
3352        this.indexCacheSize = indexCacheSize;
3353    }
3354
3355    public boolean isCheckForCorruptJournalFiles() {
3356        return checkForCorruptJournalFiles;
3357    }
3358
3359    public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) {
3360        this.checkForCorruptJournalFiles = checkForCorruptJournalFiles;
3361    }
3362
3363    public PurgeRecoveredXATransactionStrategy getPurgeRecoveredXATransactionStrategyEnum() {
3364        return purgeRecoveredXATransactionStrategy;
3365    }
3366
3367    public String getPurgeRecoveredXATransactionStrategy() {
3368        return purgeRecoveredXATransactionStrategy.name();
3369    }
3370
3371    public void setPurgeRecoveredXATransactionStrategy(String purgeRecoveredXATransactionStrategy) {
3372        this.purgeRecoveredXATransactionStrategy = PurgeRecoveredXATransactionStrategy.valueOf(
3373                purgeRecoveredXATransactionStrategy.trim().toUpperCase());
3374    }
3375
3376    public boolean isChecksumJournalFiles() {
3377        return checksumJournalFiles;
3378    }
3379
3380    public void setChecksumJournalFiles(boolean checksumJournalFiles) {
3381        this.checksumJournalFiles = checksumJournalFiles;
3382    }
3383
3384    @Override
3385    public void setBrokerService(BrokerService brokerService) {
3386        this.brokerService = brokerService;
3387    }
3388
3389    /**
3390     * @return the archiveDataLogs
3391     */
3392    public boolean isArchiveDataLogs() {
3393        return this.archiveDataLogs;
3394    }
3395
3396    /**
3397     * @param archiveDataLogs the archiveDataLogs to set
3398     */
3399    public void setArchiveDataLogs(boolean archiveDataLogs) {
3400        this.archiveDataLogs = archiveDataLogs;
3401    }
3402
3403    /**
3404     * @return the directoryArchive
3405     */
3406    public File getDirectoryArchive() {
3407        return this.directoryArchive;
3408    }
3409
3410    /**
3411     * @param directoryArchive the directoryArchive to set
3412     */
3413    public void setDirectoryArchive(File directoryArchive) {
3414        this.directoryArchive = directoryArchive;
3415    }
3416
3417    public boolean isArchiveCorruptedIndex() {
3418        return archiveCorruptedIndex;
3419    }
3420
3421    public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) {
3422        this.archiveCorruptedIndex = archiveCorruptedIndex;
3423    }
3424
3425    public float getIndexLFUEvictionFactor() {
3426        return indexLFUEvictionFactor;
3427    }
3428
3429    public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) {
3430        this.indexLFUEvictionFactor = indexLFUEvictionFactor;
3431    }
3432
3433    public boolean isUseIndexLFRUEviction() {
3434        return useIndexLFRUEviction;
3435    }
3436
3437    public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) {
3438        this.useIndexLFRUEviction = useIndexLFRUEviction;
3439    }
3440
3441    public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) {
3442        this.enableIndexDiskSyncs = enableIndexDiskSyncs;
3443    }
3444
3445    public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) {
3446        this.enableIndexRecoveryFile = enableIndexRecoveryFile;
3447    }
3448
3449    public void setEnableIndexPageCaching(boolean enableIndexPageCaching) {
3450        this.enableIndexPageCaching = enableIndexPageCaching;
3451    }
3452
3453    public boolean isEnableIndexDiskSyncs() {
3454        return enableIndexDiskSyncs;
3455    }
3456
3457    public boolean isEnableIndexRecoveryFile() {
3458        return enableIndexRecoveryFile;
3459    }
3460
3461    public boolean isEnableIndexPageCaching() {
3462        return enableIndexPageCaching;
3463    }
3464
3465    // /////////////////////////////////////////////////////////////////
3466    // Internal conversion methods.
3467    // /////////////////////////////////////////////////////////////////
3468
3469    class MessageOrderCursor{
3470        long defaultCursorPosition;
3471        long lowPriorityCursorPosition;
3472        long highPriorityCursorPosition;
3473        MessageOrderCursor(){
3474        }
3475
3476        MessageOrderCursor(long position){
3477            this.defaultCursorPosition=position;
3478            this.lowPriorityCursorPosition=position;
3479            this.highPriorityCursorPosition=position;
3480        }
3481
3482        MessageOrderCursor(MessageOrderCursor other){
3483            this.defaultCursorPosition=other.defaultCursorPosition;
3484            this.lowPriorityCursorPosition=other.lowPriorityCursorPosition;
3485            this.highPriorityCursorPosition=other.highPriorityCursorPosition;
3486        }
3487
3488        MessageOrderCursor copy() {
3489            return new MessageOrderCursor(this);
3490        }
3491
3492        void reset() {
3493            this.defaultCursorPosition=0;
3494            this.highPriorityCursorPosition=0;
3495            this.lowPriorityCursorPosition=0;
3496        }
3497
3498        void increment() {
3499            if (defaultCursorPosition!=0) {
3500                defaultCursorPosition++;
3501            }
3502            if (highPriorityCursorPosition!=0) {
3503                highPriorityCursorPosition++;
3504            }
3505            if (lowPriorityCursorPosition!=0) {
3506                lowPriorityCursorPosition++;
3507            }
3508        }
3509
3510        @Override
3511        public String toString() {
3512           return "MessageOrderCursor:[def:" + defaultCursorPosition
3513                   + ", low:" + lowPriorityCursorPosition
3514                   + ", high:" +  highPriorityCursorPosition + "]";
3515        }
3516
3517        public void sync(MessageOrderCursor other) {
3518            this.defaultCursorPosition=other.defaultCursorPosition;
3519            this.lowPriorityCursorPosition=other.lowPriorityCursorPosition;
3520            this.highPriorityCursorPosition=other.highPriorityCursorPosition;
3521        }
3522    }
3523
3524    class MessageOrderIndex {
3525        static final byte HI = 9;
3526        static final byte LO = 0;
3527        static final byte DEF = 4;
3528
3529        long nextMessageId;
3530        BTreeIndex<Long, MessageKeys> defaultPriorityIndex;
3531        BTreeIndex<Long, MessageKeys> lowPriorityIndex;
3532        BTreeIndex<Long, MessageKeys> highPriorityIndex;
3533        final MessageOrderCursor cursor = new MessageOrderCursor();
3534        Long lastDefaultKey;
3535        Long lastHighKey;
3536        Long lastLowKey;
3537        byte lastGetPriority;
3538        final List<Long> pendingAdditions = new LinkedList<>();
3539        final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller();
3540
3541        MessageKeys remove(Transaction tx, Long key) throws IOException {
3542            MessageKeys result = defaultPriorityIndex.remove(tx, key);
3543            if (result == null && highPriorityIndex!=null) {
3544                result = highPriorityIndex.remove(tx, key);
3545                if (result ==null && lowPriorityIndex!=null) {
3546                    result = lowPriorityIndex.remove(tx, key);
3547                }
3548            }
3549            return result;
3550        }
3551
3552        void load(Transaction tx) throws IOException {
3553            defaultPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3554            defaultPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3555            defaultPriorityIndex.load(tx);
3556            lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3557            lowPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3558            lowPriorityIndex.load(tx);
3559            highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3560            highPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3561            highPriorityIndex.load(tx);
3562        }
3563
3564        void allocate(Transaction tx) throws IOException {
3565            defaultPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3566            if (metadata.version >= 2) {
3567                lowPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3568                highPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3569            }
3570        }
3571
3572        void configureLast(Transaction tx) throws IOException {
3573            // Figure out the next key using the last entry in the destination.
3574            TreeSet<Long> orderedSet = new TreeSet<>();
3575
3576            addLast(orderedSet, highPriorityIndex, tx);
3577            addLast(orderedSet, defaultPriorityIndex, tx);
3578            addLast(orderedSet, lowPriorityIndex, tx);
3579
3580            if (!orderedSet.isEmpty()) {
3581                nextMessageId = orderedSet.last() + 1;
3582            }
3583        }
3584
3585        private void addLast(TreeSet<Long> orderedSet, BTreeIndex<Long, MessageKeys> index, Transaction tx) throws IOException {
3586            if (index != null) {
3587                Entry<Long, MessageKeys> lastEntry = index.getLast(tx);
3588                if (lastEntry != null) {
3589                    orderedSet.add(lastEntry.getKey());
3590                }
3591            }
3592        }
3593
3594        void clear(Transaction tx) throws IOException {
3595            this.remove(tx);
3596            this.resetCursorPosition();
3597            this.allocate(tx);
3598            this.load(tx);
3599            this.configureLast(tx);
3600        }
3601
3602        void remove(Transaction tx) throws IOException {
3603            defaultPriorityIndex.clear(tx);
3604            defaultPriorityIndex.unload(tx);
3605            tx.free(defaultPriorityIndex.getPageId());
3606            if (lowPriorityIndex != null) {
3607                lowPriorityIndex.clear(tx);
3608                lowPriorityIndex.unload(tx);
3609
3610                tx.free(lowPriorityIndex.getPageId());
3611            }
3612            if (highPriorityIndex != null) {
3613                highPriorityIndex.clear(tx);
3614                highPriorityIndex.unload(tx);
3615                tx.free(highPriorityIndex.getPageId());
3616            }
3617        }
3618
3619        void resetCursorPosition() {
3620            this.cursor.reset();
3621            lastDefaultKey = null;
3622            lastHighKey = null;
3623            lastLowKey = null;
3624        }
3625
3626        void setBatch(Transaction tx, Long sequence) throws IOException {
3627            if (sequence != null) {
3628                Long nextPosition = new Long(sequence.longValue() + 1);
3629                lastDefaultKey = sequence;
3630                cursor.defaultCursorPosition = nextPosition.longValue();
3631                lastHighKey = sequence;
3632                cursor.highPriorityCursorPosition = nextPosition.longValue();
3633                lastLowKey = sequence;
3634                cursor.lowPriorityCursorPosition = nextPosition.longValue();
3635            }
3636        }
3637
3638        void setBatch(Transaction tx, LastAck last) throws IOException {
3639            setBatch(tx, last.lastAckedSequence);
3640            if (cursor.defaultCursorPosition == 0
3641                    && cursor.highPriorityCursorPosition == 0
3642                    && cursor.lowPriorityCursorPosition == 0) {
3643                long next = last.lastAckedSequence + 1;
3644                switch (last.priority) {
3645                    case DEF:
3646                        cursor.defaultCursorPosition = next;
3647                        cursor.highPriorityCursorPosition = next;
3648                        break;
3649                    case HI:
3650                        cursor.highPriorityCursorPosition = next;
3651                        break;
3652                    case LO:
3653                        cursor.lowPriorityCursorPosition = next;
3654                        cursor.defaultCursorPosition = next;
3655                        cursor.highPriorityCursorPosition = next;
3656                        break;
3657                }
3658            }
3659        }
3660
3661        void stoppedIterating() {
3662            if (lastDefaultKey!=null) {
3663                cursor.defaultCursorPosition=lastDefaultKey.longValue()+1;
3664            }
3665            if (lastHighKey!=null) {
3666                cursor.highPriorityCursorPosition=lastHighKey.longValue()+1;
3667            }
3668            if (lastLowKey!=null) {
3669                cursor.lowPriorityCursorPosition=lastLowKey.longValue()+1;
3670            }
3671            lastDefaultKey = null;
3672            lastHighKey = null;
3673            lastLowKey = null;
3674        }
3675
3676        void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, Long sequenceId)
3677                throws IOException {
3678            if (defaultPriorityIndex.containsKey(tx, sequenceId)) {
3679                getDeleteList(tx, deletes, defaultPriorityIndex, sequenceId);
3680            } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequenceId)) {
3681                getDeleteList(tx, deletes, highPriorityIndex, sequenceId);
3682            } else if (lowPriorityIndex != null && lowPriorityIndex.containsKey(tx, sequenceId)) {
3683                getDeleteList(tx, deletes, lowPriorityIndex, sequenceId);
3684            }
3685        }
3686
3687        void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes,
3688                BTreeIndex<Long, MessageKeys> index, Long sequenceId) throws IOException {
3689
3690            Iterator<Entry<Long, MessageKeys>> iterator = index.iterator(tx, sequenceId, null);
3691            deletes.add(iterator.next());
3692        }
3693
3694        long getNextMessageId() {
3695            return nextMessageId++;
3696        }
3697
3698        void revertNextMessageId() {
3699            nextMessageId--;
3700        }
3701
3702        MessageKeys get(Transaction tx, Long key) throws IOException {
3703            MessageKeys result = defaultPriorityIndex.get(tx, key);
3704            if (result == null) {
3705                result = highPriorityIndex.get(tx, key);
3706                if (result == null) {
3707                    result = lowPriorityIndex.get(tx, key);
3708                    lastGetPriority = LO;
3709                } else {
3710                    lastGetPriority = HI;
3711                }
3712            } else {
3713                lastGetPriority = DEF;
3714            }
3715            return result;
3716        }
3717
3718        MessageKeys put(Transaction tx, int priority, Long key, MessageKeys value) throws IOException {
3719            if (priority == javax.jms.Message.DEFAULT_PRIORITY) {
3720                return defaultPriorityIndex.put(tx, key, value);
3721            } else if (priority > javax.jms.Message.DEFAULT_PRIORITY) {
3722                return highPriorityIndex.put(tx, key, value);
3723            } else {
3724                return lowPriorityIndex.put(tx, key, value);
3725            }
3726        }
3727
3728        Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx) throws IOException{
3729            return new MessageOrderIterator(tx,cursor,this);
3730        }
3731
3732        Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx, MessageOrderCursor m) throws IOException{
3733            return new MessageOrderIterator(tx,m,this);
3734        }
3735
3736        public byte lastGetPriority() {
3737            return lastGetPriority;
3738        }
3739
3740        public boolean alreadyDispatched(Long sequence) {
3741            return (cursor.highPriorityCursorPosition > 0 && cursor.highPriorityCursorPosition >= sequence) ||
3742                    (cursor.defaultCursorPosition > 0 && cursor.defaultCursorPosition >= sequence) ||
3743                    (cursor.lowPriorityCursorPosition > 0 && cursor.lowPriorityCursorPosition >= sequence);
3744        }
3745
3746        public void trackPendingAdd(Long seq) {
3747            synchronized (pendingAdditions) {
3748                pendingAdditions.add(seq);
3749            }
3750        }
3751
3752        public void trackPendingAddComplete(Long seq) {
3753            synchronized (pendingAdditions) {
3754                pendingAdditions.remove(seq);
3755            }
3756        }
3757
3758        public Long minPendingAdd() {
3759            synchronized (pendingAdditions) {
3760                if (!pendingAdditions.isEmpty()) {
3761                    return pendingAdditions.get(0);
3762                } else {
3763                    return null;
3764                }
3765            }
3766        }
3767
3768        class MessageOrderIterator implements Iterator<Entry<Long, MessageKeys>>{
3769            Iterator<Entry<Long, MessageKeys>>currentIterator;
3770            final Iterator<Entry<Long, MessageKeys>>highIterator;
3771            final Iterator<Entry<Long, MessageKeys>>defaultIterator;
3772            final Iterator<Entry<Long, MessageKeys>>lowIterator;
3773
3774            MessageOrderIterator(Transaction tx, MessageOrderCursor m, MessageOrderIndex messageOrderIndex) throws IOException {
3775                Long pendingAddLimiter = messageOrderIndex.minPendingAdd();
3776                this.defaultIterator = defaultPriorityIndex.iterator(tx, m.defaultCursorPosition, pendingAddLimiter);
3777                if (highPriorityIndex != null) {
3778                    this.highIterator = highPriorityIndex.iterator(tx, m.highPriorityCursorPosition, pendingAddLimiter);
3779                } else {
3780                    this.highIterator = null;
3781                }
3782                if (lowPriorityIndex != null) {
3783                    this.lowIterator = lowPriorityIndex.iterator(tx, m.lowPriorityCursorPosition, pendingAddLimiter);
3784                } else {
3785                    this.lowIterator = null;
3786                }
3787            }
3788
3789            @Override
3790            public boolean hasNext() {
3791                if (currentIterator == null) {
3792                    if (highIterator != null) {
3793                        if (highIterator.hasNext()) {
3794                            currentIterator = highIterator;
3795                            return currentIterator.hasNext();
3796                        }
3797                        if (defaultIterator.hasNext()) {
3798                            currentIterator = defaultIterator;
3799                            return currentIterator.hasNext();
3800                        }
3801                        if (lowIterator.hasNext()) {
3802                            currentIterator = lowIterator;
3803                            return currentIterator.hasNext();
3804                        }
3805                        return false;
3806                    } else {
3807                        currentIterator = defaultIterator;
3808                        return currentIterator.hasNext();
3809                    }
3810                }
3811                if (highIterator != null) {
3812                    if (currentIterator.hasNext()) {
3813                        return true;
3814                    }
3815                    if (currentIterator == highIterator) {
3816                        if (defaultIterator.hasNext()) {
3817                            currentIterator = defaultIterator;
3818                            return currentIterator.hasNext();
3819                        }
3820                        if (lowIterator.hasNext()) {
3821                            currentIterator = lowIterator;
3822                            return currentIterator.hasNext();
3823                        }
3824                        return false;
3825                    }
3826
3827                    if (currentIterator == defaultIterator) {
3828                        if (lowIterator.hasNext()) {
3829                            currentIterator = lowIterator;
3830                            return currentIterator.hasNext();
3831                        }
3832                        return false;
3833                    }
3834                }
3835                return currentIterator.hasNext();
3836            }
3837
3838            @Override
3839            public Entry<Long, MessageKeys> next() {
3840                Entry<Long, MessageKeys> result = currentIterator.next();
3841                if (result != null) {
3842                    Long key = result.getKey();
3843                    if (highIterator != null) {
3844                        if (currentIterator == defaultIterator) {
3845                            lastDefaultKey = key;
3846                        } else if (currentIterator == highIterator) {
3847                            lastHighKey = key;
3848                        } else {
3849                            lastLowKey = key;
3850                        }
3851                    } else {
3852                        lastDefaultKey = key;
3853                    }
3854                }
3855                return result;
3856            }
3857
3858            @Override
3859            public void remove() {
3860                throw new UnsupportedOperationException();
3861            }
3862        }
3863    }
3864
3865    private static class HashSetStringMarshaller extends VariableMarshaller<HashSet<String>> {
3866        final static HashSetStringMarshaller INSTANCE = new HashSetStringMarshaller();
3867
3868        @Override
3869        public void writePayload(HashSet<String> object, DataOutput dataOut) throws IOException {
3870            ByteArrayOutputStream baos = new ByteArrayOutputStream();
3871            ObjectOutputStream oout = new ObjectOutputStream(baos);
3872            oout.writeObject(object);
3873            oout.flush();
3874            oout.close();
3875            byte[] data = baos.toByteArray();
3876            dataOut.writeInt(data.length);
3877            dataOut.write(data);
3878        }
3879
3880        @Override
3881        @SuppressWarnings("unchecked")
3882        public HashSet<String> readPayload(DataInput dataIn) throws IOException {
3883            int dataLen = dataIn.readInt();
3884            byte[] data = new byte[dataLen];
3885            dataIn.readFully(data);
3886            ByteArrayInputStream bais = new ByteArrayInputStream(data);
3887            ObjectInputStream oin = new ObjectInputStream(bais);
3888            try {
3889                return (HashSet<String>) oin.readObject();
3890            } catch (ClassNotFoundException cfe) {
3891                IOException ioe = new IOException("Failed to read HashSet<String>: " + cfe);
3892                ioe.initCause(cfe);
3893                throw ioe;
3894            }
3895        }
3896    }
3897
3898    public File getIndexDirectory() {
3899        return indexDirectory;
3900    }
3901
3902    public void setIndexDirectory(File indexDirectory) {
3903        this.indexDirectory = indexDirectory;
3904    }
3905
3906    interface IndexAware {
3907        public void sequenceAssignedWithIndexLocked(long index);
3908    }
3909
3910    public String getPreallocationScope() {
3911        return preallocationScope;
3912    }
3913
3914    public void setPreallocationScope(String preallocationScope) {
3915        this.preallocationScope = preallocationScope;
3916    }
3917
3918    public String getPreallocationStrategy() {
3919        return preallocationStrategy;
3920    }
3921
3922    public void setPreallocationStrategy(String preallocationStrategy) {
3923        this.preallocationStrategy = preallocationStrategy;
3924    }
3925
3926    public int getCompactAcksAfterNoGC() {
3927        return compactAcksAfterNoGC;
3928    }
3929
3930    /**
3931     * Sets the number of GC cycles where no journal logs were removed before an attempt to
3932     * move forward all the acks in the last log that contains them and is otherwise unreferenced.
3933     * <p>
3934     * A value of -1 will disable this feature.
3935     *
3936     * @param compactAcksAfterNoGC
3937     *      Number of empty GC cycles before we rewrite old ACKS.
3938     */
3939    public void setCompactAcksAfterNoGC(int compactAcksAfterNoGC) {
3940        this.compactAcksAfterNoGC = compactAcksAfterNoGC;
3941    }
3942
3943    /**
3944     * Returns whether Ack compaction will ignore that the store is still growing
3945     * and run more often.
3946     *
3947     * @return the compactAcksIgnoresStoreGrowth current value.
3948     */
3949    public boolean isCompactAcksIgnoresStoreGrowth() {
3950        return compactAcksIgnoresStoreGrowth;
3951    }
3952
3953    /**
3954     * Configure if Ack compaction will occur regardless of continued growth of the
3955     * journal logs meaning that the store has not run out of space yet.  Because the
3956     * compaction operation can be costly this value is defaulted to off and the Ack
3957     * compaction is only done when it seems that the store cannot grow and larger.
3958     *
3959     * @param compactAcksIgnoresStoreGrowth the compactAcksIgnoresStoreGrowth to set
3960     */
3961    public void setCompactAcksIgnoresStoreGrowth(boolean compactAcksIgnoresStoreGrowth) {
3962        this.compactAcksIgnoresStoreGrowth = compactAcksIgnoresStoreGrowth;
3963    }
3964
3965    /**
3966     * Returns whether Ack compaction is enabled
3967     *
3968     * @return enableAckCompaction
3969     */
3970    public boolean isEnableAckCompaction() {
3971        return enableAckCompaction;
3972    }
3973
3974    /**
3975     * Configure if the Ack compaction task should be enabled to run
3976     *
3977     * @param enableAckCompaction
3978     */
3979    public void setEnableAckCompaction(boolean enableAckCompaction) {
3980        this.enableAckCompaction = enableAckCompaction;
3981    }
3982
3983    /**
3984     * @return
3985     */
3986    public boolean isEnableSubscriptionStatistics() {
3987        return enableSubscriptionStatistics;
3988    }
3989
3990    /**
3991     * Enable caching statistics for each subscription to allow non-blocking
3992     * retrieval of metrics.  This could incur some overhead to compute if there are a lot
3993     * of subscriptions.
3994     *
3995     * @param enableSubscriptionStatistics
3996     */
3997    public void setEnableSubscriptionStatistics(boolean enableSubscriptionStatistics) {
3998        this.enableSubscriptionStatistics = enableSubscriptionStatistics;
3999    }
4000}