001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.activemq.store.kahadb; 018 019import java.io.ByteArrayInputStream; 020import java.io.ByteArrayOutputStream; 021import java.io.DataInput; 022import java.io.DataOutput; 023import java.io.EOFException; 024import java.io.File; 025import java.io.IOException; 026import java.io.InputStream; 027import java.io.InterruptedIOException; 028import java.io.ObjectInputStream; 029import java.io.ObjectOutputStream; 030import java.io.OutputStream; 031import java.util.ArrayList; 032import java.util.Arrays; 033import java.util.Collection; 034import java.util.Collections; 035import java.util.Date; 036import java.util.HashMap; 037import java.util.HashSet; 038import java.util.Iterator; 039import java.util.LinkedHashMap; 040import java.util.LinkedHashSet; 041import java.util.LinkedList; 042import java.util.List; 043import java.util.Map; 044import java.util.Map.Entry; 045import java.util.Set; 046import java.util.SortedSet; 047import java.util.TreeMap; 048import java.util.TreeSet; 049import java.util.concurrent.Callable; 050import java.util.concurrent.atomic.AtomicBoolean; 051import java.util.concurrent.atomic.AtomicLong; 052import java.util.concurrent.locks.ReentrantReadWriteLock; 053 054import org.apache.activemq.ActiveMQMessageAuditNoSync; 055import org.apache.activemq.broker.BrokerService; 056import org.apache.activemq.broker.BrokerServiceAware; 057import org.apache.activemq.command.MessageAck; 058import org.apache.activemq.command.TransactionId; 059import org.apache.activemq.openwire.OpenWireFormat; 060import org.apache.activemq.protobuf.Buffer; 061import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand; 062import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand; 063import org.apache.activemq.store.kahadb.data.KahaCommitCommand; 064import org.apache.activemq.store.kahadb.data.KahaDestination; 065import org.apache.activemq.store.kahadb.data.KahaEntryType; 066import org.apache.activemq.store.kahadb.data.KahaPrepareCommand; 067import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand; 068import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand; 069import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand; 070import org.apache.activemq.store.kahadb.data.KahaRollbackCommand; 071import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand; 072import org.apache.activemq.store.kahadb.data.KahaTraceCommand; 073import org.apache.activemq.store.kahadb.data.KahaTransactionInfo; 074import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand; 075import org.apache.activemq.store.kahadb.disk.index.BTreeIndex; 076import org.apache.activemq.store.kahadb.disk.index.BTreeVisitor; 077import org.apache.activemq.store.kahadb.disk.index.ListIndex; 078import org.apache.activemq.store.kahadb.disk.journal.DataFile; 079import org.apache.activemq.store.kahadb.disk.journal.Journal; 080import org.apache.activemq.store.kahadb.disk.journal.Location; 081import org.apache.activemq.store.kahadb.disk.page.Page; 082import org.apache.activemq.store.kahadb.disk.page.PageFile; 083import org.apache.activemq.store.kahadb.disk.page.Transaction; 084import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller; 085import org.apache.activemq.store.kahadb.disk.util.LongMarshaller; 086import org.apache.activemq.store.kahadb.disk.util.Marshaller; 087import org.apache.activemq.store.kahadb.disk.util.Sequence; 088import org.apache.activemq.store.kahadb.disk.util.SequenceSet; 089import org.apache.activemq.store.kahadb.disk.util.StringMarshaller; 090import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller; 091import org.apache.activemq.util.ByteSequence; 092import org.apache.activemq.util.DataByteArrayInputStream; 093import org.apache.activemq.util.DataByteArrayOutputStream; 094import org.apache.activemq.util.IOHelper; 095import org.apache.activemq.util.ServiceStopper; 096import org.apache.activemq.util.ServiceSupport; 097import org.slf4j.Logger; 098import org.slf4j.LoggerFactory; 099 100public abstract class MessageDatabase extends ServiceSupport implements BrokerServiceAware { 101 102 protected BrokerService brokerService; 103 104 public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME"; 105 public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0); 106 public static final File DEFAULT_DIRECTORY = new File("KahaDB"); 107 protected static final Buffer UNMATCHED; 108 static { 109 UNMATCHED = new Buffer(new byte[]{}); 110 } 111 private static final Logger LOG = LoggerFactory.getLogger(MessageDatabase.class); 112 113 static final int CLOSED_STATE = 1; 114 static final int OPEN_STATE = 2; 115 static final long NOT_ACKED = -1; 116 117 static final int VERSION = 5; 118 119 protected class Metadata { 120 protected Page<Metadata> page; 121 protected int state; 122 protected BTreeIndex<String, StoredDestination> destinations; 123 protected Location lastUpdate; 124 protected Location firstInProgressTransactionLocation; 125 protected Location producerSequenceIdTrackerLocation = null; 126 protected Location ackMessageFileMapLocation = null; 127 protected transient ActiveMQMessageAuditNoSync producerSequenceIdTracker = new ActiveMQMessageAuditNoSync(); 128 protected transient Map<Integer, Set<Integer>> ackMessageFileMap = new HashMap<Integer, Set<Integer>>(); 129 protected int version = VERSION; 130 protected int openwireVersion = OpenWireFormat.DEFAULT_VERSION; 131 132 public void read(DataInput is) throws IOException { 133 state = is.readInt(); 134 destinations = new BTreeIndex<String, StoredDestination>(pageFile, is.readLong()); 135 if (is.readBoolean()) { 136 lastUpdate = LocationMarshaller.INSTANCE.readPayload(is); 137 } else { 138 lastUpdate = null; 139 } 140 if (is.readBoolean()) { 141 firstInProgressTransactionLocation = LocationMarshaller.INSTANCE.readPayload(is); 142 } else { 143 firstInProgressTransactionLocation = null; 144 } 145 try { 146 if (is.readBoolean()) { 147 producerSequenceIdTrackerLocation = LocationMarshaller.INSTANCE.readPayload(is); 148 } else { 149 producerSequenceIdTrackerLocation = null; 150 } 151 } catch (EOFException expectedOnUpgrade) { 152 } 153 try { 154 version = is.readInt(); 155 } catch (EOFException expectedOnUpgrade) { 156 version = 1; 157 } 158 if (version >= 5 && is.readBoolean()) { 159 ackMessageFileMapLocation = LocationMarshaller.INSTANCE.readPayload(is); 160 } else { 161 ackMessageFileMapLocation = null; 162 } 163 try { 164 openwireVersion = is.readInt(); 165 } catch (EOFException expectedOnUpgrade) { 166 openwireVersion = OpenWireFormat.DEFAULT_VERSION; 167 } 168 LOG.info("KahaDB is version " + version); 169 } 170 171 public void write(DataOutput os) throws IOException { 172 os.writeInt(state); 173 os.writeLong(destinations.getPageId()); 174 175 if (lastUpdate != null) { 176 os.writeBoolean(true); 177 LocationMarshaller.INSTANCE.writePayload(lastUpdate, os); 178 } else { 179 os.writeBoolean(false); 180 } 181 182 if (firstInProgressTransactionLocation != null) { 183 os.writeBoolean(true); 184 LocationMarshaller.INSTANCE.writePayload(firstInProgressTransactionLocation, os); 185 } else { 186 os.writeBoolean(false); 187 } 188 189 if (producerSequenceIdTrackerLocation != null) { 190 os.writeBoolean(true); 191 LocationMarshaller.INSTANCE.writePayload(producerSequenceIdTrackerLocation, os); 192 } else { 193 os.writeBoolean(false); 194 } 195 os.writeInt(VERSION); 196 if (ackMessageFileMapLocation != null) { 197 os.writeBoolean(true); 198 LocationMarshaller.INSTANCE.writePayload(ackMessageFileMapLocation, os); 199 } else { 200 os.writeBoolean(false); 201 } 202 os.writeInt(this.openwireVersion); 203 } 204 } 205 206 class MetadataMarshaller extends VariableMarshaller<Metadata> { 207 @Override 208 public Metadata readPayload(DataInput dataIn) throws IOException { 209 Metadata rc = createMetadata(); 210 rc.read(dataIn); 211 return rc; 212 } 213 214 @Override 215 public void writePayload(Metadata object, DataOutput dataOut) throws IOException { 216 object.write(dataOut); 217 } 218 } 219 220 protected PageFile pageFile; 221 protected Journal journal; 222 protected Metadata metadata = new Metadata(); 223 224 protected MetadataMarshaller metadataMarshaller = new MetadataMarshaller(); 225 226 protected boolean failIfDatabaseIsLocked; 227 228 protected boolean deleteAllMessages; 229 protected File directory = DEFAULT_DIRECTORY; 230 protected File indexDirectory = null; 231 protected Thread checkpointThread; 232 protected boolean enableJournalDiskSyncs=true; 233 protected boolean archiveDataLogs; 234 protected File directoryArchive; 235 protected AtomicLong journalSize = new AtomicLong(0); 236 long checkpointInterval = 5*1000; 237 long cleanupInterval = 30*1000; 238 int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH; 239 int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE; 240 boolean enableIndexWriteAsync = false; 241 int setIndexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE; 242 243 protected AtomicBoolean opened = new AtomicBoolean(); 244 private boolean ignoreMissingJournalfiles = false; 245 private int indexCacheSize = 10000; 246 private boolean checkForCorruptJournalFiles = false; 247 private boolean checksumJournalFiles = true; 248 protected boolean forceRecoverIndex = false; 249 private final Object checkpointThreadLock = new Object(); 250 private boolean rewriteOnRedelivery = false; 251 private boolean archiveCorruptedIndex = false; 252 private boolean useIndexLFRUEviction = false; 253 private float indexLFUEvictionFactor = 0.2f; 254 private boolean enableIndexDiskSyncs = true; 255 private boolean enableIndexRecoveryFile = true; 256 private boolean enableIndexPageCaching = true; 257 ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock(); 258 259 @Override 260 public void doStart() throws Exception { 261 load(); 262 } 263 264 @Override 265 public void doStop(ServiceStopper stopper) throws Exception { 266 unload(); 267 } 268 269 private void loadPageFile() throws IOException { 270 this.indexLock.writeLock().lock(); 271 try { 272 final PageFile pageFile = getPageFile(); 273 pageFile.load(); 274 pageFile.tx().execute(new Transaction.Closure<IOException>() { 275 @Override 276 public void execute(Transaction tx) throws IOException { 277 if (pageFile.getPageCount() == 0) { 278 // First time this is created.. Initialize the metadata 279 Page<Metadata> page = tx.allocate(); 280 assert page.getPageId() == 0; 281 page.set(metadata); 282 metadata.page = page; 283 metadata.state = CLOSED_STATE; 284 metadata.destinations = new BTreeIndex<String, StoredDestination>(pageFile, tx.allocate().getPageId()); 285 286 tx.store(metadata.page, metadataMarshaller, true); 287 } else { 288 Page<Metadata> page = tx.load(0, metadataMarshaller); 289 metadata = page.get(); 290 metadata.page = page; 291 } 292 metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE); 293 metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller()); 294 metadata.destinations.load(tx); 295 } 296 }); 297 // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted. 298 // Perhaps we should just keep an index of file 299 storedDestinations.clear(); 300 pageFile.tx().execute(new Transaction.Closure<IOException>() { 301 @Override 302 public void execute(Transaction tx) throws IOException { 303 for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) { 304 Entry<String, StoredDestination> entry = iterator.next(); 305 StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null); 306 storedDestinations.put(entry.getKey(), sd); 307 308 if (checkForCorruptJournalFiles) { 309 // sanity check the index also 310 if (!entry.getValue().locationIndex.isEmpty(tx)) { 311 if (entry.getValue().orderIndex.nextMessageId <= 0) { 312 throw new IOException("Detected uninitialized orderIndex nextMessageId with pending messages for " + entry.getKey()); 313 } 314 } 315 } 316 } 317 } 318 }); 319 pageFile.flush(); 320 } finally { 321 this.indexLock.writeLock().unlock(); 322 } 323 } 324 325 private void startCheckpoint() { 326 if (checkpointInterval == 0 && cleanupInterval == 0) { 327 LOG.info("periodic checkpoint/cleanup disabled, will ocurr on clean shutdown/restart"); 328 return; 329 } 330 synchronized (checkpointThreadLock) { 331 boolean start = false; 332 if (checkpointThread == null) { 333 start = true; 334 } else if (!checkpointThread.isAlive()) { 335 start = true; 336 LOG.info("KahaDB: Recovering checkpoint thread after death"); 337 } 338 if (start) { 339 checkpointThread = new Thread("ActiveMQ Journal Checkpoint Worker") { 340 @Override 341 public void run() { 342 try { 343 long lastCleanup = System.currentTimeMillis(); 344 long lastCheckpoint = System.currentTimeMillis(); 345 // Sleep for a short time so we can periodically check 346 // to see if we need to exit this thread. 347 long sleepTime = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500); 348 while (opened.get()) { 349 Thread.sleep(sleepTime); 350 long now = System.currentTimeMillis(); 351 if( cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval) ) { 352 checkpointCleanup(true); 353 lastCleanup = now; 354 lastCheckpoint = now; 355 } else if( checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval )) { 356 checkpointCleanup(false); 357 lastCheckpoint = now; 358 } 359 } 360 } catch (InterruptedException e) { 361 // Looks like someone really wants us to exit this thread... 362 } catch (IOException ioe) { 363 LOG.error("Checkpoint failed", ioe); 364 brokerService.handleIOException(ioe); 365 } 366 } 367 }; 368 369 checkpointThread.setDaemon(true); 370 checkpointThread.start(); 371 } 372 } 373 } 374 375 public void open() throws IOException { 376 if( opened.compareAndSet(false, true) ) { 377 getJournal().start(); 378 try { 379 loadPageFile(); 380 } catch (Throwable t) { 381 LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t); 382 if (LOG.isDebugEnabled()) { 383 LOG.debug("Index load failure", t); 384 } 385 // try to recover index 386 try { 387 pageFile.unload(); 388 } catch (Exception ignore) {} 389 if (archiveCorruptedIndex) { 390 pageFile.archive(); 391 } else { 392 pageFile.delete(); 393 } 394 metadata = createMetadata(); 395 pageFile = null; 396 loadPageFile(); 397 } 398 startCheckpoint(); 399 recover(); 400 } 401 } 402 403 public void load() throws IOException { 404 this.indexLock.writeLock().lock(); 405 IOHelper.mkdirs(directory); 406 try { 407 if (deleteAllMessages) { 408 getJournal().start(); 409 getJournal().delete(); 410 getJournal().close(); 411 journal = null; 412 getPageFile().delete(); 413 LOG.info("Persistence store purged."); 414 deleteAllMessages = false; 415 } 416 417 open(); 418 store(new KahaTraceCommand().setMessage("LOADED " + new Date())); 419 } finally { 420 this.indexLock.writeLock().unlock(); 421 } 422 } 423 424 public void close() throws IOException, InterruptedException { 425 if( opened.compareAndSet(true, false)) { 426 checkpointLock.writeLock().lock(); 427 try { 428 if (metadata.page != null) { 429 checkpointUpdate(true); 430 } 431 pageFile.unload(); 432 metadata = createMetadata(); 433 } finally { 434 checkpointLock.writeLock().unlock(); 435 } 436 journal.close(); 437 synchronized (checkpointThreadLock) { 438 if (checkpointThread != null) { 439 checkpointThread.join(); 440 } 441 } 442 } 443 } 444 445 public void unload() throws IOException, InterruptedException { 446 this.indexLock.writeLock().lock(); 447 try { 448 if( pageFile != null && pageFile.isLoaded() ) { 449 metadata.state = CLOSED_STATE; 450 metadata.firstInProgressTransactionLocation = getInProgressTxLocationRange()[0]; 451 452 if (metadata.page != null) { 453 pageFile.tx().execute(new Transaction.Closure<IOException>() { 454 @Override 455 public void execute(Transaction tx) throws IOException { 456 tx.store(metadata.page, metadataMarshaller, true); 457 } 458 }); 459 } 460 } 461 } finally { 462 this.indexLock.writeLock().unlock(); 463 } 464 close(); 465 } 466 467 // public for testing 468 @SuppressWarnings("rawtypes") 469 public Location[] getInProgressTxLocationRange() { 470 Location[] range = new Location[]{null, null}; 471 synchronized (inflightTransactions) { 472 if (!inflightTransactions.isEmpty()) { 473 for (List<Operation> ops : inflightTransactions.values()) { 474 if (!ops.isEmpty()) { 475 trackMaxAndMin(range, ops); 476 } 477 } 478 } 479 if (!preparedTransactions.isEmpty()) { 480 for (List<Operation> ops : preparedTransactions.values()) { 481 if (!ops.isEmpty()) { 482 trackMaxAndMin(range, ops); 483 } 484 } 485 } 486 } 487 return range; 488 } 489 490 @SuppressWarnings("rawtypes") 491 private void trackMaxAndMin(Location[] range, List<Operation> ops) { 492 Location t = ops.get(0).getLocation(); 493 if (range[0]==null || t.compareTo(range[0]) <= 0) { 494 range[0] = t; 495 } 496 t = ops.get(ops.size() -1).getLocation(); 497 if (range[1]==null || t.compareTo(range[1]) >= 0) { 498 range[1] = t; 499 } 500 } 501 502 class TranInfo { 503 TransactionId id; 504 Location location; 505 506 class opCount { 507 int add; 508 int remove; 509 } 510 HashMap<KahaDestination, opCount> destinationOpCount = new HashMap<KahaDestination, opCount>(); 511 512 @SuppressWarnings("rawtypes") 513 public void track(Operation operation) { 514 if (location == null ) { 515 location = operation.getLocation(); 516 } 517 KahaDestination destination; 518 boolean isAdd = false; 519 if (operation instanceof AddOperation) { 520 AddOperation add = (AddOperation) operation; 521 destination = add.getCommand().getDestination(); 522 isAdd = true; 523 } else { 524 RemoveOperation removeOpperation = (RemoveOperation) operation; 525 destination = removeOpperation.getCommand().getDestination(); 526 } 527 opCount opCount = destinationOpCount.get(destination); 528 if (opCount == null) { 529 opCount = new opCount(); 530 destinationOpCount.put(destination, opCount); 531 } 532 if (isAdd) { 533 opCount.add++; 534 } else { 535 opCount.remove++; 536 } 537 } 538 539 @Override 540 public String toString() { 541 StringBuffer buffer = new StringBuffer(); 542 buffer.append(location).append(";").append(id).append(";\n"); 543 for (Entry<KahaDestination, opCount> op : destinationOpCount.entrySet()) { 544 buffer.append(op.getKey()).append('+').append(op.getValue().add).append(',').append('-').append(op.getValue().remove).append(';'); 545 } 546 return buffer.toString(); 547 } 548 } 549 550 @SuppressWarnings("rawtypes") 551 public String getTransactions() { 552 553 ArrayList<TranInfo> infos = new ArrayList<TranInfo>(); 554 synchronized (inflightTransactions) { 555 if (!inflightTransactions.isEmpty()) { 556 for (Entry<TransactionId, List<Operation>> entry : inflightTransactions.entrySet()) { 557 TranInfo info = new TranInfo(); 558 info.id = entry.getKey(); 559 for (Operation operation : entry.getValue()) { 560 info.track(operation); 561 } 562 infos.add(info); 563 } 564 } 565 } 566 synchronized (preparedTransactions) { 567 if (!preparedTransactions.isEmpty()) { 568 for (Entry<TransactionId, List<Operation>> entry : preparedTransactions.entrySet()) { 569 TranInfo info = new TranInfo(); 570 info.id = entry.getKey(); 571 for (Operation operation : entry.getValue()) { 572 info.track(operation); 573 } 574 infos.add(info); 575 } 576 } 577 } 578 return infos.toString(); 579 } 580 581 /** 582 * Move all the messages that were in the journal into long term storage. We 583 * just replay and do a checkpoint. 584 * 585 * @throws IOException 586 * @throws IOException 587 * @throws IllegalStateException 588 */ 589 private void recover() throws IllegalStateException, IOException { 590 this.indexLock.writeLock().lock(); 591 try { 592 593 long start = System.currentTimeMillis(); 594 Location producerAuditPosition = recoverProducerAudit(); 595 Location ackMessageFileLocation = recoverAckMessageFileMap(); 596 Location lastIndoubtPosition = getRecoveryPosition(); 597 598 Location recoveryPosition = minimum(producerAuditPosition, ackMessageFileLocation); 599 recoveryPosition = minimum(recoveryPosition, lastIndoubtPosition); 600 601 if (recoveryPosition != null) { 602 int redoCounter = 0; 603 LOG.info("Recovering from the journal ..."); 604 while (recoveryPosition != null) { 605 JournalCommand<?> message = load(recoveryPosition); 606 metadata.lastUpdate = recoveryPosition; 607 process(message, recoveryPosition, lastIndoubtPosition); 608 redoCounter++; 609 recoveryPosition = journal.getNextLocation(recoveryPosition); 610 if (LOG.isInfoEnabled() && redoCounter % 100000 == 0) { 611 LOG.info("@" + recoveryPosition + ", " + redoCounter + " entries recovered .."); 612 } 613 } 614 if (LOG.isInfoEnabled()) { 615 long end = System.currentTimeMillis(); 616 LOG.info("Recovery replayed " + redoCounter + " operations from the journal in " + ((end - start) / 1000.0f) + " seconds."); 617 } 618 } 619 620 // We may have to undo some index updates. 621 pageFile.tx().execute(new Transaction.Closure<IOException>() { 622 @Override 623 public void execute(Transaction tx) throws IOException { 624 recoverIndex(tx); 625 } 626 }); 627 628 // rollback any recovered inflight local transactions, and discard any inflight XA transactions. 629 Set<TransactionId> toRollback = new HashSet<TransactionId>(); 630 Set<TransactionId> toDiscard = new HashSet<TransactionId>(); 631 synchronized (inflightTransactions) { 632 for (Iterator<TransactionId> it = inflightTransactions.keySet().iterator(); it.hasNext(); ) { 633 TransactionId id = it.next(); 634 if (id.isLocalTransaction()) { 635 toRollback.add(id); 636 } else { 637 toDiscard.add(id); 638 } 639 } 640 for (TransactionId tx: toRollback) { 641 if (LOG.isDebugEnabled()) { 642 LOG.debug("rolling back recovered indoubt local transaction " + tx); 643 } 644 store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convertToLocal(tx)), false, null, null); 645 } 646 for (TransactionId tx: toDiscard) { 647 if (LOG.isDebugEnabled()) { 648 LOG.debug("discarding recovered in-flight XA transaction " + tx); 649 } 650 inflightTransactions.remove(tx); 651 } 652 } 653 654 synchronized (preparedTransactions) { 655 for (TransactionId txId : preparedTransactions.keySet()) { 656 LOG.warn("Recovered prepared XA TX: [{}]", txId); 657 } 658 } 659 660 } finally { 661 this.indexLock.writeLock().unlock(); 662 } 663 } 664 665 @SuppressWarnings("unused") 666 private KahaTransactionInfo createLocalTransactionInfo(TransactionId tx) { 667 return TransactionIdConversion.convertToLocal(tx); 668 } 669 670 private Location minimum(Location producerAuditPosition, 671 Location lastIndoubtPosition) { 672 Location min = null; 673 if (producerAuditPosition != null) { 674 min = producerAuditPosition; 675 if (lastIndoubtPosition != null && lastIndoubtPosition.compareTo(producerAuditPosition) < 0) { 676 min = lastIndoubtPosition; 677 } 678 } else { 679 min = lastIndoubtPosition; 680 } 681 return min; 682 } 683 684 private Location recoverProducerAudit() throws IOException { 685 if (metadata.producerSequenceIdTrackerLocation != null) { 686 KahaProducerAuditCommand audit = (KahaProducerAuditCommand) load(metadata.producerSequenceIdTrackerLocation); 687 try { 688 ObjectInputStream objectIn = new ObjectInputStream(audit.getAudit().newInput()); 689 int maxNumProducers = getMaxFailoverProducersToTrack(); 690 int maxAuditDepth = getFailoverProducersAuditDepth(); 691 metadata.producerSequenceIdTracker = (ActiveMQMessageAuditNoSync) objectIn.readObject(); 692 metadata.producerSequenceIdTracker.setAuditDepth(maxAuditDepth); 693 metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxNumProducers); 694 return journal.getNextLocation(metadata.producerSequenceIdTrackerLocation); 695 } catch (Exception e) { 696 LOG.warn("Cannot recover message audit", e); 697 return journal.getNextLocation(null); 698 } 699 } else { 700 // got no audit stored so got to recreate via replay from start of the journal 701 return journal.getNextLocation(null); 702 } 703 } 704 705 @SuppressWarnings("unchecked") 706 private Location recoverAckMessageFileMap() throws IOException { 707 if (metadata.ackMessageFileMapLocation != null) { 708 KahaAckMessageFileMapCommand audit = (KahaAckMessageFileMapCommand) load(metadata.ackMessageFileMapLocation); 709 try { 710 ObjectInputStream objectIn = new ObjectInputStream(audit.getAckMessageFileMap().newInput()); 711 metadata.ackMessageFileMap = (Map<Integer, Set<Integer>>) objectIn.readObject(); 712 return journal.getNextLocation(metadata.ackMessageFileMapLocation); 713 } catch (Exception e) { 714 LOG.warn("Cannot recover ackMessageFileMap", e); 715 return journal.getNextLocation(null); 716 } 717 } else { 718 // got no ackMessageFileMap stored so got to recreate via replay from start of the journal 719 return journal.getNextLocation(null); 720 } 721 } 722 723 protected void recoverIndex(Transaction tx) throws IOException { 724 long start = System.currentTimeMillis(); 725 // It is possible index updates got applied before the journal updates.. 726 // in that case we need to removed references to messages that are not in the journal 727 final Location lastAppendLocation = journal.getLastAppendLocation(); 728 long undoCounter=0; 729 730 // Go through all the destinations to see if they have messages past the lastAppendLocation 731 for (StoredDestination sd : storedDestinations.values()) { 732 733 final ArrayList<Long> matches = new ArrayList<Long>(); 734 // Find all the Locations that are >= than the last Append Location. 735 sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) { 736 @Override 737 protected void matched(Location key, Long value) { 738 matches.add(value); 739 } 740 }); 741 742 for (Long sequenceId : matches) { 743 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 744 sd.locationIndex.remove(tx, keys.location); 745 sd.messageIdIndex.remove(tx, keys.messageId); 746 metadata.producerSequenceIdTracker.rollback(keys.messageId); 747 undoCounter++; 748 // TODO: do we need to modify the ack positions for the pub sub case? 749 } 750 } 751 752 if( undoCounter > 0 ) { 753 // The rolledback operations are basically in flight journal writes. To avoid getting 754 // these the end user should do sync writes to the journal. 755 if (LOG.isInfoEnabled()) { 756 long end = System.currentTimeMillis(); 757 LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds."); 758 } 759 } 760 761 undoCounter = 0; 762 start = System.currentTimeMillis(); 763 764 // Lets be extra paranoid here and verify that all the datafiles being referenced 765 // by the indexes still exists. 766 767 final SequenceSet ss = new SequenceSet(); 768 for (StoredDestination sd : storedDestinations.values()) { 769 // Use a visitor to cut down the number of pages that we load 770 sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() { 771 int last=-1; 772 773 @Override 774 public boolean isInterestedInKeysBetween(Location first, Location second) { 775 if( first==null ) { 776 return !ss.contains(0, second.getDataFileId()); 777 } else if( second==null ) { 778 return true; 779 } else { 780 return !ss.contains(first.getDataFileId(), second.getDataFileId()); 781 } 782 } 783 784 @Override 785 public void visit(List<Location> keys, List<Long> values) { 786 for (Location l : keys) { 787 int fileId = l.getDataFileId(); 788 if( last != fileId ) { 789 ss.add(fileId); 790 last = fileId; 791 } 792 } 793 } 794 795 }); 796 } 797 HashSet<Integer> missingJournalFiles = new HashSet<Integer>(); 798 while (!ss.isEmpty()) { 799 missingJournalFiles.add((int) ss.removeFirst()); 800 } 801 missingJournalFiles.removeAll(journal.getFileMap().keySet()); 802 803 if (!missingJournalFiles.isEmpty()) { 804 if (LOG.isInfoEnabled()) { 805 LOG.info("Some journal files are missing: " + missingJournalFiles); 806 } 807 } 808 809 ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<BTreeVisitor.Predicate<Location>>(); 810 for (Integer missing : missingJournalFiles) { 811 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0))); 812 } 813 814 if (checkForCorruptJournalFiles) { 815 Collection<DataFile> dataFiles = journal.getFileMap().values(); 816 for (DataFile dataFile : dataFiles) { 817 int id = dataFile.getDataFileId(); 818 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0))); 819 Sequence seq = dataFile.getCorruptedBlocks().getHead(); 820 while (seq != null) { 821 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1))); 822 seq = seq.getNext(); 823 } 824 } 825 } 826 827 if (!missingPredicates.isEmpty()) { 828 for (StoredDestination sd : storedDestinations.values()) { 829 830 final ArrayList<Long> matches = new ArrayList<Long>(); 831 sd.locationIndex.visit(tx, new BTreeVisitor.OrVisitor<Location, Long>(missingPredicates) { 832 @Override 833 protected void matched(Location key, Long value) { 834 matches.add(value); 835 } 836 }); 837 838 // If somes message references are affected by the missing data files... 839 if (!matches.isEmpty()) { 840 841 // We either 'gracefully' recover dropping the missing messages or 842 // we error out. 843 if( ignoreMissingJournalfiles ) { 844 // Update the index to remove the references to the missing data 845 for (Long sequenceId : matches) { 846 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 847 sd.locationIndex.remove(tx, keys.location); 848 sd.messageIdIndex.remove(tx, keys.messageId); 849 undoCounter++; 850 // TODO: do we need to modify the ack positions for the pub sub case? 851 } 852 } else { 853 throw new IOException("Detected missing/corrupt journal files. "+matches.size()+" messages affected."); 854 } 855 } 856 } 857 } 858 859 if( undoCounter > 0 ) { 860 // The rolledback operations are basically in flight journal writes. To avoid getting these the end user 861 // should do sync writes to the journal. 862 if (LOG.isInfoEnabled()) { 863 long end = System.currentTimeMillis(); 864 LOG.info("Detected missing/corrupt journal files. Dropped " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds."); 865 } 866 } 867 } 868 869 private Location nextRecoveryPosition; 870 private Location lastRecoveryPosition; 871 872 public void incrementalRecover() throws IOException { 873 this.indexLock.writeLock().lock(); 874 try { 875 if( nextRecoveryPosition == null ) { 876 if( lastRecoveryPosition==null ) { 877 nextRecoveryPosition = getRecoveryPosition(); 878 } else { 879 nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition); 880 } 881 } 882 while (nextRecoveryPosition != null) { 883 lastRecoveryPosition = nextRecoveryPosition; 884 metadata.lastUpdate = lastRecoveryPosition; 885 JournalCommand<?> message = load(lastRecoveryPosition); 886 process(message, lastRecoveryPosition, (IndexAware) null); 887 nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition); 888 } 889 } finally { 890 this.indexLock.writeLock().unlock(); 891 } 892 } 893 894 public Location getLastUpdatePosition() throws IOException { 895 return metadata.lastUpdate; 896 } 897 898 private Location getRecoveryPosition() throws IOException { 899 900 if (!this.forceRecoverIndex) { 901 902 // If we need to recover the transactions.. 903 if (metadata.firstInProgressTransactionLocation != null) { 904 return metadata.firstInProgressTransactionLocation; 905 } 906 907 // Perhaps there were no transactions... 908 if( metadata.lastUpdate!=null) { 909 // Start replay at the record after the last one recorded in the index file. 910 return journal.getNextLocation(metadata.lastUpdate); 911 } 912 } 913 // This loads the first position. 914 return journal.getNextLocation(null); 915 } 916 917 protected void checkpointCleanup(final boolean cleanup) throws IOException { 918 long start; 919 this.indexLock.writeLock().lock(); 920 try { 921 start = System.currentTimeMillis(); 922 if( !opened.get() ) { 923 return; 924 } 925 } finally { 926 this.indexLock.writeLock().unlock(); 927 } 928 checkpointUpdate(cleanup); 929 long end = System.currentTimeMillis(); 930 if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) { 931 if (LOG.isInfoEnabled()) { 932 LOG.info("Slow KahaDB access: cleanup took " + (end - start)); 933 } 934 } 935 } 936 937 public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException { 938 int size = data.serializedSizeFramed(); 939 DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1); 940 os.writeByte(data.type().getNumber()); 941 data.writeFramed(os); 942 return os.toByteSequence(); 943 } 944 945 // ///////////////////////////////////////////////////////////////// 946 // Methods call by the broker to update and query the store. 947 // ///////////////////////////////////////////////////////////////// 948 public Location store(JournalCommand<?> data) throws IOException { 949 return store(data, false, null,null); 950 } 951 952 public Location store(JournalCommand<?> data, Runnable onJournalStoreComplete) throws IOException { 953 return store(data, false, null, null, onJournalStoreComplete); 954 } 955 956 public Location store(JournalCommand<?> data, boolean sync, IndexAware before,Runnable after) throws IOException { 957 return store(data, sync, before, after, null); 958 } 959 960 /** 961 * All updated are are funneled through this method. The updates are converted 962 * to a JournalMessage which is logged to the journal and then the data from 963 * the JournalMessage is used to update the index just like it would be done 964 * during a recovery process. 965 */ 966 public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException { 967 try { 968 ByteSequence sequence = toByteSequence(data); 969 970 Location location; 971 checkpointLock.readLock().lock(); 972 try { 973 974 long start = System.currentTimeMillis(); 975 location = onJournalStoreComplete == null ? journal.write(sequence, sync) : journal.write(sequence, onJournalStoreComplete) ; 976 long start2 = System.currentTimeMillis(); 977 process(data, location, before); 978 979 long end = System.currentTimeMillis(); 980 if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) { 981 if (LOG.isInfoEnabled()) { 982 LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms"); 983 } 984 } 985 986 } finally{ 987 checkpointLock.readLock().unlock(); 988 } 989 if (after != null) { 990 after.run(); 991 } 992 993 if (checkpointThread != null && !checkpointThread.isAlive() && opened.get()) { 994 startCheckpoint(); 995 } 996 return location; 997 } catch (IOException ioe) { 998 LOG.error("KahaDB failed to store to Journal", ioe); 999 brokerService.handleIOException(ioe); 1000 throw ioe; 1001 } 1002 } 1003 1004 /** 1005 * Loads a previously stored JournalMessage 1006 * 1007 * @param location 1008 * @return 1009 * @throws IOException 1010 */ 1011 public JournalCommand<?> load(Location location) throws IOException { 1012 long start = System.currentTimeMillis(); 1013 ByteSequence data = journal.read(location); 1014 long end = System.currentTimeMillis(); 1015 if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) { 1016 if (LOG.isInfoEnabled()) { 1017 LOG.info("Slow KahaDB access: Journal read took: "+(end-start)+" ms"); 1018 } 1019 } 1020 DataByteArrayInputStream is = new DataByteArrayInputStream(data); 1021 byte readByte = is.readByte(); 1022 KahaEntryType type = KahaEntryType.valueOf(readByte); 1023 if( type == null ) { 1024 try { 1025 is.close(); 1026 } catch (IOException e) {} 1027 throw new IOException("Could not load journal record. Invalid location: "+location); 1028 } 1029 JournalCommand<?> message = (JournalCommand<?>)type.createMessage(); 1030 message.mergeFramed(is); 1031 return message; 1032 } 1033 1034 /** 1035 * do minimal recovery till we reach the last inDoubtLocation 1036 * @param data 1037 * @param location 1038 * @param inDoubtlocation 1039 * @throws IOException 1040 */ 1041 void process(JournalCommand<?> data, final Location location, final Location inDoubtlocation) throws IOException { 1042 if (inDoubtlocation != null && location.compareTo(inDoubtlocation) >= 0) { 1043 if (data instanceof KahaSubscriptionCommand) { 1044 KahaSubscriptionCommand kahaSubscriptionCommand = (KahaSubscriptionCommand)data; 1045 if (kahaSubscriptionCommand.hasSubscriptionInfo()) { 1046 // needs to be processed via activate and will be replayed on reconnect 1047 LOG.debug("ignoring add sub command during recovery replay:" + data); 1048 return; 1049 } 1050 } 1051 process(data, location, (IndexAware) null); 1052 } else { 1053 // just recover producer audit 1054 data.visit(new Visitor() { 1055 @Override 1056 public void visit(KahaAddMessageCommand command) throws IOException { 1057 metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId()); 1058 } 1059 }); 1060 } 1061 } 1062 1063 // ///////////////////////////////////////////////////////////////// 1064 // Journaled record processing methods. Once the record is journaled, 1065 // these methods handle applying the index updates. These may be called 1066 // from the recovery method too so they need to be idempotent 1067 // ///////////////////////////////////////////////////////////////// 1068 1069 void process(JournalCommand<?> data, final Location location, final IndexAware onSequenceAssignedCallback) throws IOException { 1070 data.visit(new Visitor() { 1071 @Override 1072 public void visit(KahaAddMessageCommand command) throws IOException { 1073 process(command, location, onSequenceAssignedCallback); 1074 } 1075 1076 @Override 1077 public void visit(KahaRemoveMessageCommand command) throws IOException { 1078 process(command, location); 1079 } 1080 1081 @Override 1082 public void visit(KahaPrepareCommand command) throws IOException { 1083 process(command, location); 1084 } 1085 1086 @Override 1087 public void visit(KahaCommitCommand command) throws IOException { 1088 process(command, location, onSequenceAssignedCallback); 1089 } 1090 1091 @Override 1092 public void visit(KahaRollbackCommand command) throws IOException { 1093 process(command, location); 1094 } 1095 1096 @Override 1097 public void visit(KahaRemoveDestinationCommand command) throws IOException { 1098 process(command, location); 1099 } 1100 1101 @Override 1102 public void visit(KahaSubscriptionCommand command) throws IOException { 1103 process(command, location); 1104 } 1105 1106 @Override 1107 public void visit(KahaProducerAuditCommand command) throws IOException { 1108 processLocation(location); 1109 } 1110 1111 @Override 1112 public void visit(KahaAckMessageFileMapCommand command) throws IOException { 1113 processLocation(location); 1114 } 1115 1116 @Override 1117 public void visit(KahaTraceCommand command) { 1118 processLocation(location); 1119 } 1120 1121 @Override 1122 public void visit(KahaUpdateMessageCommand command) throws IOException { 1123 process(command, location); 1124 } 1125 }); 1126 } 1127 1128 @SuppressWarnings("rawtypes") 1129 protected void process(final KahaAddMessageCommand command, final Location location, final IndexAware runWithIndexLock) throws IOException { 1130 if (command.hasTransactionInfo()) { 1131 List<Operation> inflightTx = getInflightTx(command.getTransactionInfo()); 1132 inflightTx.add(new AddOperation(command, location, runWithIndexLock)); 1133 } else { 1134 this.indexLock.writeLock().lock(); 1135 try { 1136 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1137 @Override 1138 public void execute(Transaction tx) throws IOException { 1139 long assignedIndex = updateIndex(tx, command, location); 1140 if (runWithIndexLock != null) { 1141 runWithIndexLock.sequenceAssignedWithIndexLocked(assignedIndex); 1142 } 1143 } 1144 }); 1145 1146 } finally { 1147 this.indexLock.writeLock().unlock(); 1148 } 1149 } 1150 } 1151 1152 @SuppressWarnings("rawtypes") 1153 protected void process(final KahaUpdateMessageCommand command, final Location location) throws IOException { 1154 this.indexLock.writeLock().lock(); 1155 try { 1156 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1157 @Override 1158 public void execute(Transaction tx) throws IOException { 1159 updateIndex(tx, command, location); 1160 } 1161 }); 1162 } finally { 1163 this.indexLock.writeLock().unlock(); 1164 } 1165 } 1166 1167 @SuppressWarnings("rawtypes") 1168 protected void process(final KahaRemoveMessageCommand command, final Location location) throws IOException { 1169 if (command.hasTransactionInfo()) { 1170 List<Operation> inflightTx = getInflightTx(command.getTransactionInfo()); 1171 inflightTx.add(new RemoveOperation(command, location)); 1172 } else { 1173 this.indexLock.writeLock().lock(); 1174 try { 1175 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1176 @Override 1177 public void execute(Transaction tx) throws IOException { 1178 updateIndex(tx, command, location); 1179 } 1180 }); 1181 } finally { 1182 this.indexLock.writeLock().unlock(); 1183 } 1184 } 1185 } 1186 1187 protected void process(final KahaRemoveDestinationCommand command, final Location location) throws IOException { 1188 this.indexLock.writeLock().lock(); 1189 try { 1190 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1191 @Override 1192 public void execute(Transaction tx) throws IOException { 1193 updateIndex(tx, command, location); 1194 } 1195 }); 1196 } finally { 1197 this.indexLock.writeLock().unlock(); 1198 } 1199 } 1200 1201 protected void process(final KahaSubscriptionCommand command, final Location location) throws IOException { 1202 this.indexLock.writeLock().lock(); 1203 try { 1204 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1205 @Override 1206 public void execute(Transaction tx) throws IOException { 1207 updateIndex(tx, command, location); 1208 } 1209 }); 1210 } finally { 1211 this.indexLock.writeLock().unlock(); 1212 } 1213 } 1214 1215 protected void processLocation(final Location location) { 1216 this.indexLock.writeLock().lock(); 1217 try { 1218 metadata.lastUpdate = location; 1219 } finally { 1220 this.indexLock.writeLock().unlock(); 1221 } 1222 } 1223 1224 @SuppressWarnings("rawtypes") 1225 protected void process(KahaCommitCommand command, final Location location, final IndexAware before) throws IOException { 1226 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1227 List<Operation> inflightTx; 1228 synchronized (inflightTransactions) { 1229 inflightTx = inflightTransactions.remove(key); 1230 if (inflightTx == null) { 1231 inflightTx = preparedTransactions.remove(key); 1232 } 1233 } 1234 if (inflightTx == null) { 1235 // only non persistent messages in this tx 1236 if (before != null) { 1237 before.sequenceAssignedWithIndexLocked(-1); 1238 } 1239 return; 1240 } 1241 1242 final List<Operation> messagingTx = inflightTx; 1243 indexLock.writeLock().lock(); 1244 try { 1245 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1246 @Override 1247 public void execute(Transaction tx) throws IOException { 1248 for (Operation op : messagingTx) { 1249 op.execute(tx); 1250 } 1251 } 1252 }); 1253 metadata.lastUpdate = location; 1254 } finally { 1255 indexLock.writeLock().unlock(); 1256 } 1257 } 1258 1259 @SuppressWarnings("rawtypes") 1260 protected void process(KahaPrepareCommand command, Location location) { 1261 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1262 synchronized (inflightTransactions) { 1263 List<Operation> tx = inflightTransactions.remove(key); 1264 if (tx != null) { 1265 preparedTransactions.put(key, tx); 1266 } 1267 } 1268 } 1269 1270 @SuppressWarnings("rawtypes") 1271 protected void process(KahaRollbackCommand command, Location location) throws IOException { 1272 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1273 List<Operation> updates = null; 1274 synchronized (inflightTransactions) { 1275 updates = inflightTransactions.remove(key); 1276 if (updates == null) { 1277 updates = preparedTransactions.remove(key); 1278 } 1279 } 1280 } 1281 1282 // ///////////////////////////////////////////////////////////////// 1283 // These methods do the actual index updates. 1284 // ///////////////////////////////////////////////////////////////// 1285 1286 protected final ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock(); 1287 private final HashSet<Integer> journalFilesBeingReplicated = new HashSet<Integer>(); 1288 1289 long updateIndex(Transaction tx, KahaAddMessageCommand command, Location location) throws IOException { 1290 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1291 1292 // Skip adding the message to the index if this is a topic and there are 1293 // no subscriptions. 1294 if (sd.subscriptions != null && sd.subscriptions.isEmpty(tx)) { 1295 return -1; 1296 } 1297 1298 // Add the message. 1299 int priority = command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY; 1300 long id = sd.orderIndex.getNextMessageId(priority); 1301 Long previous = sd.locationIndex.put(tx, location, id); 1302 if (previous == null) { 1303 previous = sd.messageIdIndex.put(tx, command.getMessageId(), id); 1304 if (previous == null) { 1305 sd.orderIndex.put(tx, priority, id, new MessageKeys(command.getMessageId(), location)); 1306 if (sd.subscriptions != null && !sd.subscriptions.isEmpty(tx)) { 1307 addAckLocationForNewMessage(tx, sd, id); 1308 } 1309 metadata.lastUpdate = location; 1310 } else { 1311 // If the message ID is indexed, then the broker asked us to store a duplicate before the message was dispatched and acked, we ignore this add attempt 1312 LOG.warn("Duplicate message add attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId()); 1313 sd.messageIdIndex.put(tx, command.getMessageId(), previous); 1314 sd.locationIndex.remove(tx, location); 1315 id = -1; 1316 } 1317 } else { 1318 // restore the previous value.. Looks like this was a redo of a previously 1319 // added message. We don't want to assign it a new id as the other indexes would 1320 // be wrong.. 1321 sd.locationIndex.put(tx, location, previous); 1322 metadata.lastUpdate = location; 1323 } 1324 // record this id in any event, initial send or recovery 1325 metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId()); 1326 return id; 1327 } 1328 1329 void trackPendingAdd(KahaDestination destination, Long seq) { 1330 StoredDestination sd = storedDestinations.get(key(destination)); 1331 if (sd != null) { 1332 sd.trackPendingAdd(seq); 1333 } 1334 } 1335 1336 void trackPendingAddComplete(KahaDestination destination, Long seq) { 1337 StoredDestination sd = storedDestinations.get(key(destination)); 1338 if (sd != null) { 1339 sd.trackPendingAddComplete(seq); 1340 } 1341 } 1342 1343 void updateIndex(Transaction tx, KahaUpdateMessageCommand updateMessageCommand, Location location) throws IOException { 1344 KahaAddMessageCommand command = updateMessageCommand.getMessage(); 1345 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1346 1347 Long id = sd.messageIdIndex.get(tx, command.getMessageId()); 1348 if (id != null) { 1349 MessageKeys previousKeys = sd.orderIndex.put( 1350 tx, 1351 command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY, 1352 id, 1353 new MessageKeys(command.getMessageId(), location) 1354 ); 1355 sd.locationIndex.put(tx, location, id); 1356 if(previousKeys != null) { 1357 sd.locationIndex.remove(tx, previousKeys.location); 1358 } 1359 metadata.lastUpdate = location; 1360 } else { 1361 LOG.warn("Non existent message update attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId()); 1362 } 1363 } 1364 1365 void updateIndex(Transaction tx, KahaRemoveMessageCommand command, Location ackLocation) throws IOException { 1366 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1367 if (!command.hasSubscriptionKey()) { 1368 1369 // In the queue case we just remove the message from the index.. 1370 Long sequenceId = sd.messageIdIndex.remove(tx, command.getMessageId()); 1371 if (sequenceId != null) { 1372 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 1373 if (keys != null) { 1374 sd.locationIndex.remove(tx, keys.location); 1375 recordAckMessageReferenceLocation(ackLocation, keys.location); 1376 metadata.lastUpdate = ackLocation; 1377 } else if (LOG.isDebugEnabled()) { 1378 LOG.debug("message not found in order index: " + sequenceId + " for: " + command.getMessageId()); 1379 } 1380 } else if (LOG.isDebugEnabled()) { 1381 LOG.debug("message not found in sequence id index: " + command.getMessageId()); 1382 } 1383 } else { 1384 // In the topic case we need remove the message once it's been acked 1385 // by all the subs 1386 Long sequence = sd.messageIdIndex.get(tx, command.getMessageId()); 1387 1388 // Make sure it's a valid message id... 1389 if (sequence != null) { 1390 String subscriptionKey = command.getSubscriptionKey(); 1391 if (command.getAck() != UNMATCHED) { 1392 sd.orderIndex.get(tx, sequence); 1393 byte priority = sd.orderIndex.lastGetPriority(); 1394 sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(sequence, priority)); 1395 } 1396 1397 MessageKeys keys = sd.orderIndex.get(tx, sequence); 1398 if (keys != null) { 1399 recordAckMessageReferenceLocation(ackLocation, keys.location); 1400 } 1401 // The following method handles deleting un-referenced messages. 1402 removeAckLocation(tx, sd, subscriptionKey, sequence); 1403 metadata.lastUpdate = ackLocation; 1404 } else if (LOG.isDebugEnabled()) { 1405 LOG.debug("no message sequence exists for id: " + command.getMessageId() + " and sub: " + command.getSubscriptionKey()); 1406 } 1407 1408 } 1409 } 1410 1411 private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) { 1412 Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId())); 1413 if (referenceFileIds == null) { 1414 referenceFileIds = new HashSet<Integer>(); 1415 referenceFileIds.add(messageLocation.getDataFileId()); 1416 metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds); 1417 } else { 1418 Integer id = Integer.valueOf(messageLocation.getDataFileId()); 1419 if (!referenceFileIds.contains(id)) { 1420 referenceFileIds.add(id); 1421 } 1422 } 1423 } 1424 1425 void updateIndex(Transaction tx, KahaRemoveDestinationCommand command, Location location) throws IOException { 1426 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1427 sd.orderIndex.remove(tx); 1428 1429 sd.locationIndex.clear(tx); 1430 sd.locationIndex.unload(tx); 1431 tx.free(sd.locationIndex.getPageId()); 1432 1433 sd.messageIdIndex.clear(tx); 1434 sd.messageIdIndex.unload(tx); 1435 tx.free(sd.messageIdIndex.getPageId()); 1436 1437 if (sd.subscriptions != null) { 1438 sd.subscriptions.clear(tx); 1439 sd.subscriptions.unload(tx); 1440 tx.free(sd.subscriptions.getPageId()); 1441 1442 sd.subscriptionAcks.clear(tx); 1443 sd.subscriptionAcks.unload(tx); 1444 tx.free(sd.subscriptionAcks.getPageId()); 1445 1446 sd.ackPositions.clear(tx); 1447 sd.ackPositions.unload(tx); 1448 tx.free(sd.ackPositions.getHeadPageId()); 1449 1450 sd.subLocations.clear(tx); 1451 sd.subLocations.unload(tx); 1452 tx.free(sd.subLocations.getHeadPageId()); 1453 } 1454 1455 String key = key(command.getDestination()); 1456 storedDestinations.remove(key); 1457 metadata.destinations.remove(tx, key); 1458 } 1459 1460 void updateIndex(Transaction tx, KahaSubscriptionCommand command, Location location) throws IOException { 1461 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1462 final String subscriptionKey = command.getSubscriptionKey(); 1463 1464 // If set then we are creating it.. otherwise we are destroying the sub 1465 if (command.hasSubscriptionInfo()) { 1466 sd.subscriptions.put(tx, subscriptionKey, command); 1467 sd.subLocations.put(tx, subscriptionKey, location); 1468 long ackLocation=NOT_ACKED; 1469 if (!command.getRetroactive()) { 1470 ackLocation = sd.orderIndex.nextMessageId-1; 1471 } else { 1472 addAckLocationForRetroactiveSub(tx, sd, subscriptionKey); 1473 } 1474 sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(ackLocation)); 1475 sd.subscriptionCache.add(subscriptionKey); 1476 } else { 1477 // delete the sub... 1478 sd.subscriptions.remove(tx, subscriptionKey); 1479 sd.subLocations.remove(tx, subscriptionKey); 1480 sd.subscriptionAcks.remove(tx, subscriptionKey); 1481 sd.subscriptionCache.remove(subscriptionKey); 1482 removeAckLocationsForSub(tx, sd, subscriptionKey); 1483 1484 if (sd.subscriptions.isEmpty(tx)) { 1485 sd.messageIdIndex.clear(tx); 1486 sd.locationIndex.clear(tx); 1487 sd.orderIndex.clear(tx); 1488 } 1489 } 1490 } 1491 1492 private void checkpointUpdate(final boolean cleanup) throws IOException { 1493 checkpointLock.writeLock().lock(); 1494 try { 1495 this.indexLock.writeLock().lock(); 1496 try { 1497 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1498 @Override 1499 public void execute(Transaction tx) throws IOException { 1500 checkpointUpdate(tx, cleanup); 1501 } 1502 }); 1503 } finally { 1504 this.indexLock.writeLock().unlock(); 1505 } 1506 1507 } finally { 1508 checkpointLock.writeLock().unlock(); 1509 } 1510 } 1511 1512 /** 1513 * @param tx 1514 * @throws IOException 1515 */ 1516 void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException { 1517 LOG.debug("Checkpoint started."); 1518 1519 // reflect last update exclusive of current checkpoint 1520 Location lastUpdate = metadata.lastUpdate; 1521 1522 metadata.state = OPEN_STATE; 1523 metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit(); 1524 metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap(); 1525 Location[] inProgressTxRange = getInProgressTxLocationRange(); 1526 metadata.firstInProgressTransactionLocation = inProgressTxRange[0]; 1527 tx.store(metadata.page, metadataMarshaller, true); 1528 pageFile.flush(); 1529 1530 if( cleanup ) { 1531 1532 final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet()); 1533 final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet); 1534 1535 if (LOG.isTraceEnabled()) { 1536 LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet); 1537 } 1538 1539 if (lastUpdate != null) { 1540 gcCandidateSet.remove(lastUpdate.getDataFileId()); 1541 } 1542 1543 // Don't GC files under replication 1544 if( journalFilesBeingReplicated!=null ) { 1545 gcCandidateSet.removeAll(journalFilesBeingReplicated); 1546 } 1547 1548 if (metadata.producerSequenceIdTrackerLocation != null) { 1549 int dataFileId = metadata.producerSequenceIdTrackerLocation.getDataFileId(); 1550 if (gcCandidateSet.contains(dataFileId) && gcCandidateSet.first() == dataFileId) { 1551 // rewrite so we don't prevent gc 1552 metadata.producerSequenceIdTracker.setModified(true); 1553 if (LOG.isTraceEnabled()) { 1554 LOG.trace("rewriting producerSequenceIdTracker:" + metadata.producerSequenceIdTrackerLocation); 1555 } 1556 } 1557 gcCandidateSet.remove(dataFileId); 1558 if (LOG.isTraceEnabled()) { 1559 LOG.trace("gc candidates after producerSequenceIdTrackerLocation:" + dataFileId + ", " + gcCandidateSet); 1560 } 1561 } 1562 1563 if (metadata.ackMessageFileMapLocation != null) { 1564 int dataFileId = metadata.ackMessageFileMapLocation.getDataFileId(); 1565 gcCandidateSet.remove(dataFileId); 1566 if (LOG.isTraceEnabled()) { 1567 LOG.trace("gc candidates after ackMessageFileMapLocation:" + dataFileId + ", " + gcCandidateSet); 1568 } 1569 } 1570 1571 // Don't GC files referenced by in-progress tx 1572 if (inProgressTxRange[0] != null) { 1573 for (int pendingTx=inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) { 1574 gcCandidateSet.remove(pendingTx); 1575 } 1576 } 1577 if (LOG.isTraceEnabled()) { 1578 LOG.trace("gc candidates after tx range:" + Arrays.asList(inProgressTxRange) + ", " + gcCandidateSet); 1579 } 1580 1581 // Go through all the destinations to see if any of them can remove GC candidates. 1582 for (Entry<String, StoredDestination> entry : storedDestinations.entrySet()) { 1583 if( gcCandidateSet.isEmpty() ) { 1584 break; 1585 } 1586 1587 // Use a visitor to cut down the number of pages that we load 1588 entry.getValue().locationIndex.visit(tx, new BTreeVisitor<Location, Long>() { 1589 int last=-1; 1590 @Override 1591 public boolean isInterestedInKeysBetween(Location first, Location second) { 1592 if( first==null ) { 1593 SortedSet<Integer> subset = gcCandidateSet.headSet(second.getDataFileId()+1); 1594 if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) { 1595 subset.remove(second.getDataFileId()); 1596 } 1597 return !subset.isEmpty(); 1598 } else if( second==null ) { 1599 SortedSet<Integer> subset = gcCandidateSet.tailSet(first.getDataFileId()); 1600 if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) { 1601 subset.remove(first.getDataFileId()); 1602 } 1603 return !subset.isEmpty(); 1604 } else { 1605 SortedSet<Integer> subset = gcCandidateSet.subSet(first.getDataFileId(), second.getDataFileId()+1); 1606 if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) { 1607 subset.remove(first.getDataFileId()); 1608 } 1609 if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) { 1610 subset.remove(second.getDataFileId()); 1611 } 1612 return !subset.isEmpty(); 1613 } 1614 } 1615 1616 @Override 1617 public void visit(List<Location> keys, List<Long> values) { 1618 for (Location l : keys) { 1619 int fileId = l.getDataFileId(); 1620 if( last != fileId ) { 1621 gcCandidateSet.remove(fileId); 1622 last = fileId; 1623 } 1624 } 1625 } 1626 }); 1627 1628 // Durable Subscription 1629 if (entry.getValue().subLocations != null) { 1630 Iterator<Entry<String, Location>> iter = entry.getValue().subLocations.iterator(tx); 1631 while (iter.hasNext()) { 1632 Entry<String, Location> subscription = iter.next(); 1633 int dataFileId = subscription.getValue().getDataFileId(); 1634 1635 // Move subscription along if it has no outstanding messages that need ack'd 1636 // and its in the last log file in the journal. 1637 if (!gcCandidateSet.isEmpty() && gcCandidateSet.first() == dataFileId) { 1638 final StoredDestination destination = entry.getValue(); 1639 final String subscriptionKey = subscription.getKey(); 1640 SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey); 1641 1642 // When pending is size one that is the next message Id meaning there 1643 // are no pending messages currently. 1644 if (pendingAcks == null || pendingAcks.size() <= 1) { 1645 if (LOG.isTraceEnabled()) { 1646 LOG.trace("Found candidate for rewrite: {} from file {}", entry.getKey(), dataFileId); 1647 } 1648 1649 final KahaSubscriptionCommand kahaSub = 1650 destination.subscriptions.get(tx, subscriptionKey); 1651 destination.subLocations.put( 1652 tx, subscriptionKey, checkpointSubscriptionCommand(kahaSub)); 1653 1654 // Skips the remove from candidates if we rewrote the subscription 1655 // in order to prevent duplicate subscription commands on recover. 1656 // If another subscription is on the same file and isn't rewritten 1657 // than it will remove the file from the set. 1658 continue; 1659 } 1660 } 1661 1662 gcCandidateSet.remove(dataFileId); 1663 } 1664 } 1665 1666 if (LOG.isTraceEnabled()) { 1667 LOG.trace("gc candidates after dest:" + entry.getKey() + ", " + gcCandidateSet); 1668 } 1669 } 1670 1671 // check we are not deleting file with ack for in-use journal files 1672 if (LOG.isTraceEnabled()) { 1673 LOG.trace("gc candidates: " + gcCandidateSet); 1674 } 1675 Iterator<Integer> candidates = gcCandidateSet.iterator(); 1676 while (candidates.hasNext()) { 1677 Integer candidate = candidates.next(); 1678 Set<Integer> referencedFileIds = metadata.ackMessageFileMap.get(candidate); 1679 if (referencedFileIds != null) { 1680 for (Integer referencedFileId : referencedFileIds) { 1681 if (completeFileSet.contains(referencedFileId) && !gcCandidateSet.contains(referencedFileId)) { 1682 // active file that is not targeted for deletion is referenced so don't delete 1683 candidates.remove(); 1684 break; 1685 } 1686 } 1687 if (gcCandidateSet.contains(candidate)) { 1688 metadata.ackMessageFileMap.remove(candidate); 1689 } else { 1690 if (LOG.isTraceEnabled()) { 1691 LOG.trace("not removing data file: " + candidate 1692 + " as contained ack(s) refer to referenced file: " + referencedFileIds); 1693 } 1694 } 1695 } 1696 } 1697 1698 if (!gcCandidateSet.isEmpty()) { 1699 if (LOG.isDebugEnabled()) { 1700 LOG.debug("Cleanup removing the data files: " + gcCandidateSet); 1701 } 1702 journal.removeDataFiles(gcCandidateSet); 1703 } 1704 } 1705 1706 LOG.debug("Checkpoint done."); 1707 } 1708 1709 final Runnable nullCompletionCallback = new Runnable() { 1710 @Override 1711 public void run() { 1712 } 1713 }; 1714 1715 private Location checkpointProducerAudit() throws IOException { 1716 if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) { 1717 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 1718 ObjectOutputStream oout = new ObjectOutputStream(baos); 1719 oout.writeObject(metadata.producerSequenceIdTracker); 1720 oout.flush(); 1721 oout.close(); 1722 // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false 1723 Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback); 1724 try { 1725 location.getLatch().await(); 1726 } catch (InterruptedException e) { 1727 throw new InterruptedIOException(e.toString()); 1728 } 1729 return location; 1730 } 1731 return metadata.producerSequenceIdTrackerLocation; 1732 } 1733 1734 private Location checkpointAckMessageFileMap() throws IOException { 1735 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 1736 ObjectOutputStream oout = new ObjectOutputStream(baos); 1737 oout.writeObject(metadata.ackMessageFileMap); 1738 oout.flush(); 1739 oout.close(); 1740 // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false 1741 Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback); 1742 try { 1743 location.getLatch().await(); 1744 } catch (InterruptedException e) { 1745 throw new InterruptedIOException(e.toString()); 1746 } 1747 return location; 1748 } 1749 1750 private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException { 1751 1752 ByteSequence sequence = toByteSequence(subscription); 1753 Location location = journal.write(sequence, nullCompletionCallback) ; 1754 1755 try { 1756 location.getLatch().await(); 1757 } catch (InterruptedException e) { 1758 throw new InterruptedIOException(e.toString()); 1759 } 1760 return location; 1761 } 1762 1763 public HashSet<Integer> getJournalFilesBeingReplicated() { 1764 return journalFilesBeingReplicated; 1765 } 1766 1767 // ///////////////////////////////////////////////////////////////// 1768 // StoredDestination related implementation methods. 1769 // ///////////////////////////////////////////////////////////////// 1770 1771 protected final HashMap<String, StoredDestination> storedDestinations = new HashMap<String, StoredDestination>(); 1772 1773 static class MessageKeys { 1774 final String messageId; 1775 final Location location; 1776 1777 public MessageKeys(String messageId, Location location) { 1778 this.messageId=messageId; 1779 this.location=location; 1780 } 1781 1782 @Override 1783 public String toString() { 1784 return "["+messageId+","+location+"]"; 1785 } 1786 } 1787 1788 static protected class MessageKeysMarshaller extends VariableMarshaller<MessageKeys> { 1789 static final MessageKeysMarshaller INSTANCE = new MessageKeysMarshaller(); 1790 1791 @Override 1792 public MessageKeys readPayload(DataInput dataIn) throws IOException { 1793 return new MessageKeys(dataIn.readUTF(), LocationMarshaller.INSTANCE.readPayload(dataIn)); 1794 } 1795 1796 @Override 1797 public void writePayload(MessageKeys object, DataOutput dataOut) throws IOException { 1798 dataOut.writeUTF(object.messageId); 1799 LocationMarshaller.INSTANCE.writePayload(object.location, dataOut); 1800 } 1801 } 1802 1803 class LastAck { 1804 long lastAckedSequence; 1805 byte priority; 1806 1807 public LastAck(LastAck source) { 1808 this.lastAckedSequence = source.lastAckedSequence; 1809 this.priority = source.priority; 1810 } 1811 1812 public LastAck() { 1813 this.priority = MessageOrderIndex.HI; 1814 } 1815 1816 public LastAck(long ackLocation) { 1817 this.lastAckedSequence = ackLocation; 1818 this.priority = MessageOrderIndex.LO; 1819 } 1820 1821 public LastAck(long ackLocation, byte priority) { 1822 this.lastAckedSequence = ackLocation; 1823 this.priority = priority; 1824 } 1825 1826 @Override 1827 public String toString() { 1828 return "[" + lastAckedSequence + ":" + priority + "]"; 1829 } 1830 } 1831 1832 protected class LastAckMarshaller implements Marshaller<LastAck> { 1833 1834 @Override 1835 public void writePayload(LastAck object, DataOutput dataOut) throws IOException { 1836 dataOut.writeLong(object.lastAckedSequence); 1837 dataOut.writeByte(object.priority); 1838 } 1839 1840 @Override 1841 public LastAck readPayload(DataInput dataIn) throws IOException { 1842 LastAck lastAcked = new LastAck(); 1843 lastAcked.lastAckedSequence = dataIn.readLong(); 1844 if (metadata.version >= 3) { 1845 lastAcked.priority = dataIn.readByte(); 1846 } 1847 return lastAcked; 1848 } 1849 1850 @Override 1851 public int getFixedSize() { 1852 return 9; 1853 } 1854 1855 @Override 1856 public LastAck deepCopy(LastAck source) { 1857 return new LastAck(source); 1858 } 1859 1860 @Override 1861 public boolean isDeepCopySupported() { 1862 return true; 1863 } 1864 } 1865 1866 class StoredDestination { 1867 1868 MessageOrderIndex orderIndex = new MessageOrderIndex(); 1869 BTreeIndex<Location, Long> locationIndex; 1870 BTreeIndex<String, Long> messageIdIndex; 1871 1872 // These bits are only set for Topics 1873 BTreeIndex<String, KahaSubscriptionCommand> subscriptions; 1874 BTreeIndex<String, LastAck> subscriptionAcks; 1875 HashMap<String, MessageOrderCursor> subscriptionCursors; 1876 ListIndex<String, SequenceSet> ackPositions; 1877 ListIndex<String, Location> subLocations; 1878 1879 // Transient data used to track which Messages are no longer needed. 1880 final TreeMap<Long, Long> messageReferences = new TreeMap<Long, Long>(); 1881 final HashSet<String> subscriptionCache = new LinkedHashSet<String>(); 1882 1883 public void trackPendingAdd(Long seq) { 1884 orderIndex.trackPendingAdd(seq); 1885 } 1886 1887 public void trackPendingAddComplete(Long seq) { 1888 orderIndex.trackPendingAddComplete(seq); 1889 } 1890 1891 @Override 1892 public String toString() { 1893 return "nextSeq:" + orderIndex.nextMessageId + ",lastRet:" + orderIndex.cursor + ",pending:" + orderIndex.pendingAdditions.size(); 1894 } 1895 } 1896 1897 protected class StoredDestinationMarshaller extends VariableMarshaller<StoredDestination> { 1898 1899 @Override 1900 public StoredDestination readPayload(final DataInput dataIn) throws IOException { 1901 final StoredDestination value = new StoredDestination(); 1902 value.orderIndex.defaultPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 1903 value.locationIndex = new BTreeIndex<Location, Long>(pageFile, dataIn.readLong()); 1904 value.messageIdIndex = new BTreeIndex<String, Long>(pageFile, dataIn.readLong()); 1905 1906 if (dataIn.readBoolean()) { 1907 value.subscriptions = new BTreeIndex<String, KahaSubscriptionCommand>(pageFile, dataIn.readLong()); 1908 value.subscriptionAcks = new BTreeIndex<String, LastAck>(pageFile, dataIn.readLong()); 1909 if (metadata.version >= 4) { 1910 value.ackPositions = new ListIndex<String, SequenceSet>(pageFile, dataIn.readLong()); 1911 } else { 1912 // upgrade 1913 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1914 @Override 1915 public void execute(Transaction tx) throws IOException { 1916 LinkedHashMap<String, SequenceSet> temp = new LinkedHashMap<String, SequenceSet>(); 1917 1918 if (metadata.version >= 3) { 1919 // migrate 1920 BTreeIndex<Long, HashSet<String>> oldAckPositions = 1921 new BTreeIndex<Long, HashSet<String>>(pageFile, dataIn.readLong()); 1922 oldAckPositions.setKeyMarshaller(LongMarshaller.INSTANCE); 1923 oldAckPositions.setValueMarshaller(HashSetStringMarshaller.INSTANCE); 1924 oldAckPositions.load(tx); 1925 1926 1927 // Do the initial build of the data in memory before writing into the store 1928 // based Ack Positions List to avoid a lot of disk thrashing. 1929 Iterator<Entry<Long, HashSet<String>>> iterator = oldAckPositions.iterator(tx); 1930 while (iterator.hasNext()) { 1931 Entry<Long, HashSet<String>> entry = iterator.next(); 1932 1933 for(String subKey : entry.getValue()) { 1934 SequenceSet pendingAcks = temp.get(subKey); 1935 if (pendingAcks == null) { 1936 pendingAcks = new SequenceSet(); 1937 temp.put(subKey, pendingAcks); 1938 } 1939 1940 pendingAcks.add(entry.getKey()); 1941 } 1942 } 1943 } 1944 // Now move the pending messages to ack data into the store backed 1945 // structure. 1946 value.ackPositions = new ListIndex<String, SequenceSet>(pageFile, tx.allocate()); 1947 value.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE); 1948 value.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE); 1949 value.ackPositions.load(tx); 1950 for(String subscriptionKey : temp.keySet()) { 1951 value.ackPositions.put(tx, subscriptionKey, temp.get(subscriptionKey)); 1952 } 1953 1954 } 1955 }); 1956 } 1957 1958 if (metadata.version >= 5) { 1959 value.subLocations = new ListIndex<String, Location>(pageFile, dataIn.readLong()); 1960 } else { 1961 // upgrade 1962 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1963 @Override 1964 public void execute(Transaction tx) throws IOException { 1965 value.subLocations = new ListIndex<String, Location>(pageFile, tx.allocate()); 1966 value.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE); 1967 value.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE); 1968 value.subLocations.load(tx); 1969 } 1970 }); 1971 } 1972 } 1973 if (metadata.version >= 2) { 1974 value.orderIndex.lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 1975 value.orderIndex.highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 1976 } else { 1977 // upgrade 1978 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1979 @Override 1980 public void execute(Transaction tx) throws IOException { 1981 value.orderIndex.lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 1982 value.orderIndex.lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 1983 value.orderIndex.lowPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 1984 value.orderIndex.lowPriorityIndex.load(tx); 1985 1986 value.orderIndex.highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 1987 value.orderIndex.highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 1988 value.orderIndex.highPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 1989 value.orderIndex.highPriorityIndex.load(tx); 1990 } 1991 }); 1992 } 1993 1994 return value; 1995 } 1996 1997 @Override 1998 public void writePayload(StoredDestination value, DataOutput dataOut) throws IOException { 1999 dataOut.writeLong(value.orderIndex.defaultPriorityIndex.getPageId()); 2000 dataOut.writeLong(value.locationIndex.getPageId()); 2001 dataOut.writeLong(value.messageIdIndex.getPageId()); 2002 if (value.subscriptions != null) { 2003 dataOut.writeBoolean(true); 2004 dataOut.writeLong(value.subscriptions.getPageId()); 2005 dataOut.writeLong(value.subscriptionAcks.getPageId()); 2006 dataOut.writeLong(value.ackPositions.getHeadPageId()); 2007 dataOut.writeLong(value.subLocations.getHeadPageId()); 2008 } else { 2009 dataOut.writeBoolean(false); 2010 } 2011 dataOut.writeLong(value.orderIndex.lowPriorityIndex.getPageId()); 2012 dataOut.writeLong(value.orderIndex.highPriorityIndex.getPageId()); 2013 } 2014 } 2015 2016 static class KahaSubscriptionCommandMarshaller extends VariableMarshaller<KahaSubscriptionCommand> { 2017 final static KahaSubscriptionCommandMarshaller INSTANCE = new KahaSubscriptionCommandMarshaller(); 2018 2019 @Override 2020 public KahaSubscriptionCommand readPayload(DataInput dataIn) throws IOException { 2021 KahaSubscriptionCommand rc = new KahaSubscriptionCommand(); 2022 rc.mergeFramed((InputStream)dataIn); 2023 return rc; 2024 } 2025 2026 @Override 2027 public void writePayload(KahaSubscriptionCommand object, DataOutput dataOut) throws IOException { 2028 object.writeFramed((OutputStream)dataOut); 2029 } 2030 } 2031 2032 protected StoredDestination getStoredDestination(KahaDestination destination, Transaction tx) throws IOException { 2033 String key = key(destination); 2034 StoredDestination rc = storedDestinations.get(key); 2035 if (rc == null) { 2036 boolean topic = destination.getType() == KahaDestination.DestinationType.TOPIC || destination.getType() == KahaDestination.DestinationType.TEMP_TOPIC; 2037 rc = loadStoredDestination(tx, key, topic); 2038 // Cache it. We may want to remove/unload destinations from the 2039 // cache that are not used for a while 2040 // to reduce memory usage. 2041 storedDestinations.put(key, rc); 2042 } 2043 return rc; 2044 } 2045 2046 protected StoredDestination getExistingStoredDestination(KahaDestination destination, Transaction tx) throws IOException { 2047 String key = key(destination); 2048 StoredDestination rc = storedDestinations.get(key); 2049 if (rc == null && metadata.destinations.containsKey(tx, key)) { 2050 rc = getStoredDestination(destination, tx); 2051 } 2052 return rc; 2053 } 2054 2055 /** 2056 * @param tx 2057 * @param key 2058 * @param topic 2059 * @return 2060 * @throws IOException 2061 */ 2062 private StoredDestination loadStoredDestination(Transaction tx, String key, boolean topic) throws IOException { 2063 // Try to load the existing indexes.. 2064 StoredDestination rc = metadata.destinations.get(tx, key); 2065 if (rc == null) { 2066 // Brand new destination.. allocate indexes for it. 2067 rc = new StoredDestination(); 2068 rc.orderIndex.allocate(tx); 2069 rc.locationIndex = new BTreeIndex<Location, Long>(pageFile, tx.allocate()); 2070 rc.messageIdIndex = new BTreeIndex<String, Long>(pageFile, tx.allocate()); 2071 2072 if (topic) { 2073 rc.subscriptions = new BTreeIndex<String, KahaSubscriptionCommand>(pageFile, tx.allocate()); 2074 rc.subscriptionAcks = new BTreeIndex<String, LastAck>(pageFile, tx.allocate()); 2075 rc.ackPositions = new ListIndex<String, SequenceSet>(pageFile, tx.allocate()); 2076 rc.subLocations = new ListIndex<String, Location>(pageFile, tx.allocate()); 2077 } 2078 metadata.destinations.put(tx, key, rc); 2079 } 2080 2081 // Configure the marshalers and load. 2082 rc.orderIndex.load(tx); 2083 2084 // Figure out the next key using the last entry in the destination. 2085 rc.orderIndex.configureLast(tx); 2086 2087 rc.locationIndex.setKeyMarshaller(org.apache.activemq.store.kahadb.disk.util.LocationMarshaller.INSTANCE); 2088 rc.locationIndex.setValueMarshaller(LongMarshaller.INSTANCE); 2089 rc.locationIndex.load(tx); 2090 2091 rc.messageIdIndex.setKeyMarshaller(StringMarshaller.INSTANCE); 2092 rc.messageIdIndex.setValueMarshaller(LongMarshaller.INSTANCE); 2093 rc.messageIdIndex.load(tx); 2094 2095 // If it was a topic... 2096 if (topic) { 2097 2098 rc.subscriptions.setKeyMarshaller(StringMarshaller.INSTANCE); 2099 rc.subscriptions.setValueMarshaller(KahaSubscriptionCommandMarshaller.INSTANCE); 2100 rc.subscriptions.load(tx); 2101 2102 rc.subscriptionAcks.setKeyMarshaller(StringMarshaller.INSTANCE); 2103 rc.subscriptionAcks.setValueMarshaller(new LastAckMarshaller()); 2104 rc.subscriptionAcks.load(tx); 2105 2106 rc.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE); 2107 rc.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE); 2108 rc.ackPositions.load(tx); 2109 2110 rc.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE); 2111 rc.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE); 2112 rc.subLocations.load(tx); 2113 2114 rc.subscriptionCursors = new HashMap<String, MessageOrderCursor>(); 2115 2116 if (metadata.version < 3) { 2117 2118 // on upgrade need to fill ackLocation with available messages past last ack 2119 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) { 2120 Entry<String, LastAck> entry = iterator.next(); 2121 for (Iterator<Entry<Long, MessageKeys>> orderIterator = 2122 rc.orderIndex.iterator(tx, new MessageOrderCursor(entry.getValue().lastAckedSequence)); orderIterator.hasNext(); ) { 2123 Long sequence = orderIterator.next().getKey(); 2124 addAckLocation(tx, rc, sequence, entry.getKey()); 2125 } 2126 // modify so it is upgraded 2127 rc.subscriptionAcks.put(tx, entry.getKey(), entry.getValue()); 2128 } 2129 } 2130 2131 // Configure the message references index 2132 Iterator<Entry<String, SequenceSet>> subscriptions = rc.ackPositions.iterator(tx); 2133 while (subscriptions.hasNext()) { 2134 Entry<String, SequenceSet> subscription = subscriptions.next(); 2135 SequenceSet pendingAcks = subscription.getValue(); 2136 if (pendingAcks != null && !pendingAcks.isEmpty()) { 2137 Long lastPendingAck = pendingAcks.getTail().getLast(); 2138 for(Long sequenceId : pendingAcks) { 2139 Long current = rc.messageReferences.get(sequenceId); 2140 if (current == null) { 2141 current = new Long(0); 2142 } 2143 2144 // We always add a trailing empty entry for the next position to start from 2145 // so we need to ensure we don't count that as a message reference on reload. 2146 if (!sequenceId.equals(lastPendingAck)) { 2147 current = current.longValue() + 1; 2148 } 2149 2150 rc.messageReferences.put(sequenceId, current); 2151 } 2152 } 2153 } 2154 2155 // Configure the subscription cache 2156 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) { 2157 Entry<String, LastAck> entry = iterator.next(); 2158 rc.subscriptionCache.add(entry.getKey()); 2159 } 2160 2161 if (rc.orderIndex.nextMessageId == 0) { 2162 // check for existing durable sub all acked out - pull next seq from acks as messages are gone 2163 if (!rc.subscriptionAcks.isEmpty(tx)) { 2164 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext();) { 2165 Entry<String, LastAck> entry = iterator.next(); 2166 rc.orderIndex.nextMessageId = 2167 Math.max(rc.orderIndex.nextMessageId, entry.getValue().lastAckedSequence +1); 2168 } 2169 } 2170 } else { 2171 // update based on ackPositions for unmatched, last entry is always the next 2172 if (!rc.messageReferences.isEmpty()) { 2173 Long nextMessageId = (Long) rc.messageReferences.keySet().toArray()[rc.messageReferences.size() - 1]; 2174 rc.orderIndex.nextMessageId = 2175 Math.max(rc.orderIndex.nextMessageId, nextMessageId); 2176 } 2177 } 2178 } 2179 2180 if (metadata.version < VERSION) { 2181 // store again after upgrade 2182 metadata.destinations.put(tx, key, rc); 2183 } 2184 return rc; 2185 } 2186 2187 private void addAckLocation(Transaction tx, StoredDestination sd, Long messageSequence, String subscriptionKey) throws IOException { 2188 SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey); 2189 if (sequences == null) { 2190 sequences = new SequenceSet(); 2191 sequences.add(messageSequence); 2192 sd.ackPositions.add(tx, subscriptionKey, sequences); 2193 } else { 2194 sequences.add(messageSequence); 2195 sd.ackPositions.put(tx, subscriptionKey, sequences); 2196 } 2197 2198 Long count = sd.messageReferences.get(messageSequence); 2199 if (count == null) { 2200 count = Long.valueOf(0L); 2201 } 2202 count = count.longValue() + 1; 2203 sd.messageReferences.put(messageSequence, count); 2204 } 2205 2206 // new sub is interested in potentially all existing messages 2207 private void addAckLocationForRetroactiveSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2208 SequenceSet allOutstanding = new SequenceSet(); 2209 Iterator<Map.Entry<String, SequenceSet>> iterator = sd.ackPositions.iterator(tx); 2210 while (iterator.hasNext()) { 2211 SequenceSet set = iterator.next().getValue(); 2212 for (Long entry : set) { 2213 allOutstanding.add(entry); 2214 } 2215 } 2216 sd.ackPositions.put(tx, subscriptionKey, allOutstanding); 2217 2218 for (Long ackPosition : allOutstanding) { 2219 Long count = sd.messageReferences.get(ackPosition); 2220 count = count.longValue() + 1; 2221 sd.messageReferences.put(ackPosition, count); 2222 } 2223 } 2224 2225 // on a new message add, all existing subs are interested in this message 2226 private void addAckLocationForNewMessage(Transaction tx, StoredDestination sd, Long messageSequence) throws IOException { 2227 for(String subscriptionKey : sd.subscriptionCache) { 2228 SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey); 2229 if (sequences == null) { 2230 sequences = new SequenceSet(); 2231 sequences.add(new Sequence(messageSequence, messageSequence + 1)); 2232 sd.ackPositions.add(tx, subscriptionKey, sequences); 2233 } else { 2234 sequences.add(new Sequence(messageSequence, messageSequence + 1)); 2235 sd.ackPositions.put(tx, subscriptionKey, sequences); 2236 } 2237 2238 Long count = sd.messageReferences.get(messageSequence); 2239 if (count == null) { 2240 count = Long.valueOf(0L); 2241 } 2242 count = count.longValue() + 1; 2243 sd.messageReferences.put(messageSequence, count); 2244 sd.messageReferences.put(messageSequence+1, Long.valueOf(0L)); 2245 } 2246 } 2247 2248 private void removeAckLocationsForSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2249 if (!sd.ackPositions.isEmpty(tx)) { 2250 SequenceSet sequences = sd.ackPositions.remove(tx, subscriptionKey); 2251 if (sequences == null || sequences.isEmpty()) { 2252 return; 2253 } 2254 2255 ArrayList<Long> unreferenced = new ArrayList<Long>(); 2256 2257 for(Long sequenceId : sequences) { 2258 Long references = sd.messageReferences.get(sequenceId); 2259 if (references != null) { 2260 references = references.longValue() - 1; 2261 2262 if (references.longValue() > 0) { 2263 sd.messageReferences.put(sequenceId, references); 2264 } else { 2265 sd.messageReferences.remove(sequenceId); 2266 unreferenced.add(sequenceId); 2267 } 2268 } 2269 } 2270 2271 for(Long sequenceId : unreferenced) { 2272 // Find all the entries that need to get deleted. 2273 ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<Entry<Long, MessageKeys>>(); 2274 sd.orderIndex.getDeleteList(tx, deletes, sequenceId); 2275 2276 // Do the actual deletes. 2277 for (Entry<Long, MessageKeys> entry : deletes) { 2278 sd.locationIndex.remove(tx, entry.getValue().location); 2279 sd.messageIdIndex.remove(tx, entry.getValue().messageId); 2280 sd.orderIndex.remove(tx, entry.getKey()); 2281 } 2282 } 2283 } 2284 } 2285 2286 /** 2287 * @param tx 2288 * @param sd 2289 * @param subscriptionKey 2290 * @param messageSequence 2291 * @throws IOException 2292 */ 2293 private void removeAckLocation(Transaction tx, StoredDestination sd, String subscriptionKey, Long messageSequence) throws IOException { 2294 // Remove the sub from the previous location set.. 2295 if (messageSequence != null) { 2296 SequenceSet range = sd.ackPositions.get(tx, subscriptionKey); 2297 if (range != null && !range.isEmpty()) { 2298 range.remove(messageSequence); 2299 if (!range.isEmpty()) { 2300 sd.ackPositions.put(tx, subscriptionKey, range); 2301 } else { 2302 sd.ackPositions.remove(tx, subscriptionKey); 2303 } 2304 2305 // Check if the message is reference by any other subscription. 2306 Long count = sd.messageReferences.get(messageSequence); 2307 if (count != null){ 2308 long references = count.longValue() - 1; 2309 if (references > 0) { 2310 sd.messageReferences.put(messageSequence, Long.valueOf(references)); 2311 return; 2312 } else { 2313 sd.messageReferences.remove(messageSequence); 2314 } 2315 } 2316 2317 // Find all the entries that need to get deleted. 2318 ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<Entry<Long, MessageKeys>>(); 2319 sd.orderIndex.getDeleteList(tx, deletes, messageSequence); 2320 2321 // Do the actual deletes. 2322 for (Entry<Long, MessageKeys> entry : deletes) { 2323 sd.locationIndex.remove(tx, entry.getValue().location); 2324 sd.messageIdIndex.remove(tx, entry.getValue().messageId); 2325 sd.orderIndex.remove(tx, entry.getKey()); 2326 } 2327 } 2328 } 2329 } 2330 2331 public LastAck getLastAck(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2332 return sd.subscriptionAcks.get(tx, subscriptionKey); 2333 } 2334 2335 public long getStoredMessageCount(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2336 SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey); 2337 if (messageSequences != null) { 2338 long result = messageSequences.rangeSize(); 2339 // if there's anything in the range the last value is always the nextMessage marker, so remove 1. 2340 return result > 0 ? result - 1 : 0; 2341 } 2342 2343 return 0; 2344 } 2345 2346 protected String key(KahaDestination destination) { 2347 return destination.getType().getNumber() + ":" + destination.getName(); 2348 } 2349 2350 // ///////////////////////////////////////////////////////////////// 2351 // Transaction related implementation methods. 2352 // ///////////////////////////////////////////////////////////////// 2353 @SuppressWarnings("rawtypes") 2354 private final LinkedHashMap<TransactionId, List<Operation>> inflightTransactions = new LinkedHashMap<TransactionId, List<Operation>>(); 2355 @SuppressWarnings("rawtypes") 2356 protected final LinkedHashMap<TransactionId, List<Operation>> preparedTransactions = new LinkedHashMap<TransactionId, List<Operation>>(); 2357 protected final Set<String> ackedAndPrepared = new HashSet<String>(); 2358 protected final Set<String> rolledBackAcks = new HashSet<String>(); 2359 2360 // messages that have prepared (pending) acks cannot be re-dispatched unless the outcome is rollback, 2361 // till then they are skipped by the store. 2362 // 'at most once' XA guarantee 2363 public void trackRecoveredAcks(ArrayList<MessageAck> acks) { 2364 this.indexLock.writeLock().lock(); 2365 try { 2366 for (MessageAck ack : acks) { 2367 ackedAndPrepared.add(ack.getLastMessageId().toProducerKey()); 2368 } 2369 } finally { 2370 this.indexLock.writeLock().unlock(); 2371 } 2372 } 2373 2374 public void forgetRecoveredAcks(ArrayList<MessageAck> acks, boolean rollback) throws IOException { 2375 if (acks != null) { 2376 this.indexLock.writeLock().lock(); 2377 try { 2378 for (MessageAck ack : acks) { 2379 final String id = ack.getLastMessageId().toProducerKey(); 2380 ackedAndPrepared.remove(id); 2381 if (rollback) { 2382 rolledBackAcks.add(id); 2383 } 2384 } 2385 } finally { 2386 this.indexLock.writeLock().unlock(); 2387 } 2388 } 2389 } 2390 2391 @SuppressWarnings("rawtypes") 2392 private List<Operation> getInflightTx(KahaTransactionInfo info) { 2393 TransactionId key = TransactionIdConversion.convert(info); 2394 List<Operation> tx; 2395 synchronized (inflightTransactions) { 2396 tx = inflightTransactions.get(key); 2397 if (tx == null) { 2398 tx = Collections.synchronizedList(new ArrayList<Operation>()); 2399 inflightTransactions.put(key, tx); 2400 } 2401 } 2402 return tx; 2403 } 2404 2405 @SuppressWarnings("unused") 2406 private TransactionId key(KahaTransactionInfo transactionInfo) { 2407 return TransactionIdConversion.convert(transactionInfo); 2408 } 2409 2410 abstract class Operation <T extends JournalCommand<T>> { 2411 final T command; 2412 final Location location; 2413 2414 public Operation(T command, Location location) { 2415 this.command = command; 2416 this.location = location; 2417 } 2418 2419 public Location getLocation() { 2420 return location; 2421 } 2422 2423 public T getCommand() { 2424 return command; 2425 } 2426 2427 abstract public void execute(Transaction tx) throws IOException; 2428 } 2429 2430 class AddOperation extends Operation<KahaAddMessageCommand> { 2431 final IndexAware runWithIndexLock; 2432 public AddOperation(KahaAddMessageCommand command, Location location, IndexAware runWithIndexLock) { 2433 super(command, location); 2434 this.runWithIndexLock = runWithIndexLock; 2435 } 2436 2437 @Override 2438 public void execute(Transaction tx) throws IOException { 2439 long seq = updateIndex(tx, command, location); 2440 if (runWithIndexLock != null) { 2441 runWithIndexLock.sequenceAssignedWithIndexLocked(seq); 2442 } 2443 } 2444 2445 } 2446 2447 class RemoveOperation extends Operation<KahaRemoveMessageCommand> { 2448 2449 public RemoveOperation(KahaRemoveMessageCommand command, Location location) { 2450 super(command, location); 2451 } 2452 2453 @Override 2454 public void execute(Transaction tx) throws IOException { 2455 updateIndex(tx, command, location); 2456 } 2457 } 2458 2459 // ///////////////////////////////////////////////////////////////// 2460 // Initialization related implementation methods. 2461 // ///////////////////////////////////////////////////////////////// 2462 2463 private PageFile createPageFile() throws IOException { 2464 if( indexDirectory == null ) { 2465 indexDirectory = directory; 2466 } 2467 IOHelper.mkdirs(indexDirectory); 2468 PageFile index = new PageFile(indexDirectory, "db"); 2469 index.setEnableWriteThread(isEnableIndexWriteAsync()); 2470 index.setWriteBatchSize(getIndexWriteBatchSize()); 2471 index.setPageCacheSize(indexCacheSize); 2472 index.setUseLFRUEviction(isUseIndexLFRUEviction()); 2473 index.setLFUEvictionFactor(getIndexLFUEvictionFactor()); 2474 index.setEnableDiskSyncs(isEnableIndexDiskSyncs()); 2475 index.setEnableRecoveryFile(isEnableIndexRecoveryFile()); 2476 index.setEnablePageCaching(isEnableIndexPageCaching()); 2477 return index; 2478 } 2479 2480 private Journal createJournal() throws IOException { 2481 Journal manager = new Journal(); 2482 manager.setDirectory(directory); 2483 manager.setMaxFileLength(getJournalMaxFileLength()); 2484 manager.setCheckForCorruptionOnStartup(checkForCorruptJournalFiles); 2485 manager.setChecksum(checksumJournalFiles || checkForCorruptJournalFiles); 2486 manager.setWriteBatchSize(getJournalMaxWriteBatchSize()); 2487 manager.setArchiveDataLogs(isArchiveDataLogs()); 2488 manager.setSizeAccumulator(journalSize); 2489 manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs()); 2490 if (getDirectoryArchive() != null) { 2491 IOHelper.mkdirs(getDirectoryArchive()); 2492 manager.setDirectoryArchive(getDirectoryArchive()); 2493 } 2494 return manager; 2495 } 2496 2497 private Metadata createMetadata() { 2498 Metadata md = new Metadata(); 2499 md.producerSequenceIdTracker.setAuditDepth(getFailoverProducersAuditDepth()); 2500 md.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(getMaxFailoverProducersToTrack()); 2501 return md; 2502 } 2503 2504 public int getJournalMaxWriteBatchSize() { 2505 return journalMaxWriteBatchSize; 2506 } 2507 2508 public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) { 2509 this.journalMaxWriteBatchSize = journalMaxWriteBatchSize; 2510 } 2511 2512 public File getDirectory() { 2513 return directory; 2514 } 2515 2516 public void setDirectory(File directory) { 2517 this.directory = directory; 2518 } 2519 2520 public boolean isDeleteAllMessages() { 2521 return deleteAllMessages; 2522 } 2523 2524 public void setDeleteAllMessages(boolean deleteAllMessages) { 2525 this.deleteAllMessages = deleteAllMessages; 2526 } 2527 2528 public void setIndexWriteBatchSize(int setIndexWriteBatchSize) { 2529 this.setIndexWriteBatchSize = setIndexWriteBatchSize; 2530 } 2531 2532 public int getIndexWriteBatchSize() { 2533 return setIndexWriteBatchSize; 2534 } 2535 2536 public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) { 2537 this.enableIndexWriteAsync = enableIndexWriteAsync; 2538 } 2539 2540 boolean isEnableIndexWriteAsync() { 2541 return enableIndexWriteAsync; 2542 } 2543 2544 public boolean isEnableJournalDiskSyncs() { 2545 return enableJournalDiskSyncs; 2546 } 2547 2548 public void setEnableJournalDiskSyncs(boolean syncWrites) { 2549 this.enableJournalDiskSyncs = syncWrites; 2550 } 2551 2552 public long getCheckpointInterval() { 2553 return checkpointInterval; 2554 } 2555 2556 public void setCheckpointInterval(long checkpointInterval) { 2557 this.checkpointInterval = checkpointInterval; 2558 } 2559 2560 public long getCleanupInterval() { 2561 return cleanupInterval; 2562 } 2563 2564 public void setCleanupInterval(long cleanupInterval) { 2565 this.cleanupInterval = cleanupInterval; 2566 } 2567 2568 public void setJournalMaxFileLength(int journalMaxFileLength) { 2569 this.journalMaxFileLength = journalMaxFileLength; 2570 } 2571 2572 public int getJournalMaxFileLength() { 2573 return journalMaxFileLength; 2574 } 2575 2576 public void setMaxFailoverProducersToTrack(int maxFailoverProducersToTrack) { 2577 this.metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack); 2578 } 2579 2580 public int getMaxFailoverProducersToTrack() { 2581 return this.metadata.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack(); 2582 } 2583 2584 public void setFailoverProducersAuditDepth(int failoverProducersAuditDepth) { 2585 this.metadata.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth); 2586 } 2587 2588 public int getFailoverProducersAuditDepth() { 2589 return this.metadata.producerSequenceIdTracker.getAuditDepth(); 2590 } 2591 2592 public PageFile getPageFile() throws IOException { 2593 if (pageFile == null) { 2594 pageFile = createPageFile(); 2595 } 2596 return pageFile; 2597 } 2598 2599 public Journal getJournal() throws IOException { 2600 if (journal == null) { 2601 journal = createJournal(); 2602 } 2603 return journal; 2604 } 2605 2606 public boolean isFailIfDatabaseIsLocked() { 2607 return failIfDatabaseIsLocked; 2608 } 2609 2610 public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) { 2611 this.failIfDatabaseIsLocked = failIfDatabaseIsLocked; 2612 } 2613 2614 public boolean isIgnoreMissingJournalfiles() { 2615 return ignoreMissingJournalfiles; 2616 } 2617 2618 public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) { 2619 this.ignoreMissingJournalfiles = ignoreMissingJournalfiles; 2620 } 2621 2622 public int getIndexCacheSize() { 2623 return indexCacheSize; 2624 } 2625 2626 public void setIndexCacheSize(int indexCacheSize) { 2627 this.indexCacheSize = indexCacheSize; 2628 } 2629 2630 public boolean isCheckForCorruptJournalFiles() { 2631 return checkForCorruptJournalFiles; 2632 } 2633 2634 public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) { 2635 this.checkForCorruptJournalFiles = checkForCorruptJournalFiles; 2636 } 2637 2638 public boolean isChecksumJournalFiles() { 2639 return checksumJournalFiles; 2640 } 2641 2642 public void setChecksumJournalFiles(boolean checksumJournalFiles) { 2643 this.checksumJournalFiles = checksumJournalFiles; 2644 } 2645 2646 @Override 2647 public void setBrokerService(BrokerService brokerService) { 2648 this.brokerService = brokerService; 2649 } 2650 2651 /** 2652 * @return the archiveDataLogs 2653 */ 2654 public boolean isArchiveDataLogs() { 2655 return this.archiveDataLogs; 2656 } 2657 2658 /** 2659 * @param archiveDataLogs the archiveDataLogs to set 2660 */ 2661 public void setArchiveDataLogs(boolean archiveDataLogs) { 2662 this.archiveDataLogs = archiveDataLogs; 2663 } 2664 2665 /** 2666 * @return the directoryArchive 2667 */ 2668 public File getDirectoryArchive() { 2669 return this.directoryArchive; 2670 } 2671 2672 /** 2673 * @param directoryArchive the directoryArchive to set 2674 */ 2675 public void setDirectoryArchive(File directoryArchive) { 2676 this.directoryArchive = directoryArchive; 2677 } 2678 2679 public boolean isArchiveCorruptedIndex() { 2680 return archiveCorruptedIndex; 2681 } 2682 2683 public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) { 2684 this.archiveCorruptedIndex = archiveCorruptedIndex; 2685 } 2686 2687 public float getIndexLFUEvictionFactor() { 2688 return indexLFUEvictionFactor; 2689 } 2690 2691 public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) { 2692 this.indexLFUEvictionFactor = indexLFUEvictionFactor; 2693 } 2694 2695 public boolean isUseIndexLFRUEviction() { 2696 return useIndexLFRUEviction; 2697 } 2698 2699 public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) { 2700 this.useIndexLFRUEviction = useIndexLFRUEviction; 2701 } 2702 2703 public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) { 2704 this.enableIndexDiskSyncs = enableIndexDiskSyncs; 2705 } 2706 2707 public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) { 2708 this.enableIndexRecoveryFile = enableIndexRecoveryFile; 2709 } 2710 2711 public void setEnableIndexPageCaching(boolean enableIndexPageCaching) { 2712 this.enableIndexPageCaching = enableIndexPageCaching; 2713 } 2714 2715 public boolean isEnableIndexDiskSyncs() { 2716 return enableIndexDiskSyncs; 2717 } 2718 2719 public boolean isEnableIndexRecoveryFile() { 2720 return enableIndexRecoveryFile; 2721 } 2722 2723 public boolean isEnableIndexPageCaching() { 2724 return enableIndexPageCaching; 2725 } 2726 2727 // ///////////////////////////////////////////////////////////////// 2728 // Internal conversion methods. 2729 // ///////////////////////////////////////////////////////////////// 2730 2731 class MessageOrderCursor{ 2732 long defaultCursorPosition; 2733 long lowPriorityCursorPosition; 2734 long highPriorityCursorPosition; 2735 MessageOrderCursor(){ 2736 } 2737 2738 MessageOrderCursor(long position){ 2739 this.defaultCursorPosition=position; 2740 this.lowPriorityCursorPosition=position; 2741 this.highPriorityCursorPosition=position; 2742 } 2743 2744 MessageOrderCursor(MessageOrderCursor other){ 2745 this.defaultCursorPosition=other.defaultCursorPosition; 2746 this.lowPriorityCursorPosition=other.lowPriorityCursorPosition; 2747 this.highPriorityCursorPosition=other.highPriorityCursorPosition; 2748 } 2749 2750 MessageOrderCursor copy() { 2751 return new MessageOrderCursor(this); 2752 } 2753 2754 void reset() { 2755 this.defaultCursorPosition=0; 2756 this.highPriorityCursorPosition=0; 2757 this.lowPriorityCursorPosition=0; 2758 } 2759 2760 void increment() { 2761 if (defaultCursorPosition!=0) { 2762 defaultCursorPosition++; 2763 } 2764 if (highPriorityCursorPosition!=0) { 2765 highPriorityCursorPosition++; 2766 } 2767 if (lowPriorityCursorPosition!=0) { 2768 lowPriorityCursorPosition++; 2769 } 2770 } 2771 2772 @Override 2773 public String toString() { 2774 return "MessageOrderCursor:[def:" + defaultCursorPosition 2775 + ", low:" + lowPriorityCursorPosition 2776 + ", high:" + highPriorityCursorPosition + "]"; 2777 } 2778 2779 public void sync(MessageOrderCursor other) { 2780 this.defaultCursorPosition=other.defaultCursorPosition; 2781 this.lowPriorityCursorPosition=other.lowPriorityCursorPosition; 2782 this.highPriorityCursorPosition=other.highPriorityCursorPosition; 2783 } 2784 } 2785 2786 class MessageOrderIndex { 2787 static final byte HI = 9; 2788 static final byte LO = 0; 2789 static final byte DEF = 4; 2790 2791 long nextMessageId; 2792 BTreeIndex<Long, MessageKeys> defaultPriorityIndex; 2793 BTreeIndex<Long, MessageKeys> lowPriorityIndex; 2794 BTreeIndex<Long, MessageKeys> highPriorityIndex; 2795 final MessageOrderCursor cursor = new MessageOrderCursor(); 2796 Long lastDefaultKey; 2797 Long lastHighKey; 2798 Long lastLowKey; 2799 byte lastGetPriority; 2800 final List<Long> pendingAdditions = new LinkedList<Long>(); 2801 2802 MessageKeys remove(Transaction tx, Long key) throws IOException { 2803 MessageKeys result = defaultPriorityIndex.remove(tx, key); 2804 if (result == null && highPriorityIndex!=null) { 2805 result = highPriorityIndex.remove(tx, key); 2806 if (result ==null && lowPriorityIndex!=null) { 2807 result = lowPriorityIndex.remove(tx, key); 2808 } 2809 } 2810 return result; 2811 } 2812 2813 void load(Transaction tx) throws IOException { 2814 defaultPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2815 defaultPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2816 defaultPriorityIndex.load(tx); 2817 lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2818 lowPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2819 lowPriorityIndex.load(tx); 2820 highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2821 highPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 2822 highPriorityIndex.load(tx); 2823 } 2824 2825 void allocate(Transaction tx) throws IOException { 2826 defaultPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2827 if (metadata.version >= 2) { 2828 lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2829 highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2830 } 2831 } 2832 2833 void configureLast(Transaction tx) throws IOException { 2834 // Figure out the next key using the last entry in the destination. 2835 if (highPriorityIndex != null) { 2836 Entry<Long, MessageKeys> lastEntry = highPriorityIndex.getLast(tx); 2837 if (lastEntry != null) { 2838 nextMessageId = lastEntry.getKey() + 1; 2839 } else { 2840 lastEntry = defaultPriorityIndex.getLast(tx); 2841 if (lastEntry != null) { 2842 nextMessageId = lastEntry.getKey() + 1; 2843 } else { 2844 lastEntry = lowPriorityIndex.getLast(tx); 2845 if (lastEntry != null) { 2846 nextMessageId = lastEntry.getKey() + 1; 2847 } 2848 } 2849 } 2850 } else { 2851 Entry<Long, MessageKeys> lastEntry = defaultPriorityIndex.getLast(tx); 2852 if (lastEntry != null) { 2853 nextMessageId = lastEntry.getKey() + 1; 2854 } 2855 } 2856 } 2857 2858 void clear(Transaction tx) throws IOException { 2859 this.remove(tx); 2860 this.resetCursorPosition(); 2861 this.allocate(tx); 2862 this.load(tx); 2863 this.configureLast(tx); 2864 } 2865 2866 void remove(Transaction tx) throws IOException { 2867 defaultPriorityIndex.clear(tx); 2868 defaultPriorityIndex.unload(tx); 2869 tx.free(defaultPriorityIndex.getPageId()); 2870 if (lowPriorityIndex != null) { 2871 lowPriorityIndex.clear(tx); 2872 lowPriorityIndex.unload(tx); 2873 2874 tx.free(lowPriorityIndex.getPageId()); 2875 } 2876 if (highPriorityIndex != null) { 2877 highPriorityIndex.clear(tx); 2878 highPriorityIndex.unload(tx); 2879 tx.free(highPriorityIndex.getPageId()); 2880 } 2881 } 2882 2883 void resetCursorPosition() { 2884 this.cursor.reset(); 2885 lastDefaultKey = null; 2886 lastHighKey = null; 2887 lastLowKey = null; 2888 } 2889 2890 void setBatch(Transaction tx, Long sequence) throws IOException { 2891 if (sequence != null) { 2892 Long nextPosition = new Long(sequence.longValue() + 1); 2893 if (defaultPriorityIndex.containsKey(tx, sequence)) { 2894 lastDefaultKey = sequence; 2895 cursor.defaultCursorPosition = nextPosition.longValue(); 2896 } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequence)) { 2897 lastHighKey = sequence; 2898 cursor.highPriorityCursorPosition = nextPosition.longValue(); 2899 } else if (lowPriorityIndex.containsKey(tx, sequence)) { 2900 lastLowKey = sequence; 2901 cursor.lowPriorityCursorPosition = nextPosition.longValue(); 2902 } else { 2903 lastDefaultKey = sequence; 2904 cursor.defaultCursorPosition = nextPosition.longValue(); 2905 } 2906 } 2907 } 2908 2909 void setBatch(Transaction tx, LastAck last) throws IOException { 2910 setBatch(tx, last.lastAckedSequence); 2911 if (cursor.defaultCursorPosition == 0 2912 && cursor.highPriorityCursorPosition == 0 2913 && cursor.lowPriorityCursorPosition == 0) { 2914 long next = last.lastAckedSequence + 1; 2915 switch (last.priority) { 2916 case DEF: 2917 cursor.defaultCursorPosition = next; 2918 cursor.highPriorityCursorPosition = next; 2919 break; 2920 case HI: 2921 cursor.highPriorityCursorPosition = next; 2922 break; 2923 case LO: 2924 cursor.lowPriorityCursorPosition = next; 2925 cursor.defaultCursorPosition = next; 2926 cursor.highPriorityCursorPosition = next; 2927 break; 2928 } 2929 } 2930 } 2931 2932 void stoppedIterating() { 2933 if (lastDefaultKey!=null) { 2934 cursor.defaultCursorPosition=lastDefaultKey.longValue()+1; 2935 } 2936 if (lastHighKey!=null) { 2937 cursor.highPriorityCursorPosition=lastHighKey.longValue()+1; 2938 } 2939 if (lastLowKey!=null) { 2940 cursor.lowPriorityCursorPosition=lastLowKey.longValue()+1; 2941 } 2942 lastDefaultKey = null; 2943 lastHighKey = null; 2944 lastLowKey = null; 2945 } 2946 2947 void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, Long sequenceId) 2948 throws IOException { 2949 if (defaultPriorityIndex.containsKey(tx, sequenceId)) { 2950 getDeleteList(tx, deletes, defaultPriorityIndex, sequenceId); 2951 } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequenceId)) { 2952 getDeleteList(tx, deletes, highPriorityIndex, sequenceId); 2953 } else if (lowPriorityIndex != null && lowPriorityIndex.containsKey(tx, sequenceId)) { 2954 getDeleteList(tx, deletes, lowPriorityIndex, sequenceId); 2955 } 2956 } 2957 2958 void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, 2959 BTreeIndex<Long, MessageKeys> index, Long sequenceId) throws IOException { 2960 2961 Iterator<Entry<Long, MessageKeys>> iterator = index.iterator(tx, sequenceId, null); 2962 deletes.add(iterator.next()); 2963 } 2964 2965 long getNextMessageId(int priority) { 2966 return nextMessageId++; 2967 } 2968 2969 MessageKeys get(Transaction tx, Long key) throws IOException { 2970 MessageKeys result = defaultPriorityIndex.get(tx, key); 2971 if (result == null) { 2972 result = highPriorityIndex.get(tx, key); 2973 if (result == null) { 2974 result = lowPriorityIndex.get(tx, key); 2975 lastGetPriority = LO; 2976 } else { 2977 lastGetPriority = HI; 2978 } 2979 } else { 2980 lastGetPriority = DEF; 2981 } 2982 return result; 2983 } 2984 2985 MessageKeys put(Transaction tx, int priority, Long key, MessageKeys value) throws IOException { 2986 if (priority == javax.jms.Message.DEFAULT_PRIORITY) { 2987 return defaultPriorityIndex.put(tx, key, value); 2988 } else if (priority > javax.jms.Message.DEFAULT_PRIORITY) { 2989 return highPriorityIndex.put(tx, key, value); 2990 } else { 2991 return lowPriorityIndex.put(tx, key, value); 2992 } 2993 } 2994 2995 Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx) throws IOException{ 2996 return new MessageOrderIterator(tx,cursor,this); 2997 } 2998 2999 Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx, MessageOrderCursor m) throws IOException{ 3000 return new MessageOrderIterator(tx,m,this); 3001 } 3002 3003 public byte lastGetPriority() { 3004 return lastGetPriority; 3005 } 3006 3007 public boolean alreadyDispatched(Long sequence) { 3008 return (cursor.highPriorityCursorPosition > 0 && cursor.highPriorityCursorPosition >= sequence) || 3009 (cursor.defaultCursorPosition > 0 && cursor.defaultCursorPosition >= sequence) || 3010 (cursor.lowPriorityCursorPosition > 0 && cursor.lowPriorityCursorPosition >= sequence); 3011 } 3012 3013 public void trackPendingAdd(Long seq) { 3014 synchronized (pendingAdditions) { 3015 pendingAdditions.add(seq); 3016 } 3017 } 3018 3019 public void trackPendingAddComplete(Long seq) { 3020 synchronized (pendingAdditions) { 3021 pendingAdditions.remove(seq); 3022 } 3023 } 3024 3025 public Long minPendingAdd() { 3026 synchronized (pendingAdditions) { 3027 if (!pendingAdditions.isEmpty()) { 3028 return pendingAdditions.get(0); 3029 } else { 3030 return null; 3031 } 3032 } 3033 } 3034 3035 3036 class MessageOrderIterator implements Iterator<Entry<Long, MessageKeys>>{ 3037 Iterator<Entry<Long, MessageKeys>>currentIterator; 3038 final Iterator<Entry<Long, MessageKeys>>highIterator; 3039 final Iterator<Entry<Long, MessageKeys>>defaultIterator; 3040 final Iterator<Entry<Long, MessageKeys>>lowIterator; 3041 3042 MessageOrderIterator(Transaction tx, MessageOrderCursor m, MessageOrderIndex messageOrderIndex) throws IOException { 3043 Long pendingAddLimiter = messageOrderIndex.minPendingAdd(); 3044 this.defaultIterator = defaultPriorityIndex.iterator(tx, m.defaultCursorPosition, pendingAddLimiter); 3045 if (highPriorityIndex != null) { 3046 this.highIterator = highPriorityIndex.iterator(tx, m.highPriorityCursorPosition, pendingAddLimiter); 3047 } else { 3048 this.highIterator = null; 3049 } 3050 if (lowPriorityIndex != null) { 3051 this.lowIterator = lowPriorityIndex.iterator(tx, m.lowPriorityCursorPosition, pendingAddLimiter); 3052 } else { 3053 this.lowIterator = null; 3054 } 3055 } 3056 3057 @Override 3058 public boolean hasNext() { 3059 if (currentIterator == null) { 3060 if (highIterator != null) { 3061 if (highIterator.hasNext()) { 3062 currentIterator = highIterator; 3063 return currentIterator.hasNext(); 3064 } 3065 if (defaultIterator.hasNext()) { 3066 currentIterator = defaultIterator; 3067 return currentIterator.hasNext(); 3068 } 3069 if (lowIterator.hasNext()) { 3070 currentIterator = lowIterator; 3071 return currentIterator.hasNext(); 3072 } 3073 return false; 3074 } else { 3075 currentIterator = defaultIterator; 3076 return currentIterator.hasNext(); 3077 } 3078 } 3079 if (highIterator != null) { 3080 if (currentIterator.hasNext()) { 3081 return true; 3082 } 3083 if (currentIterator == highIterator) { 3084 if (defaultIterator.hasNext()) { 3085 currentIterator = defaultIterator; 3086 return currentIterator.hasNext(); 3087 } 3088 if (lowIterator.hasNext()) { 3089 currentIterator = lowIterator; 3090 return currentIterator.hasNext(); 3091 } 3092 return false; 3093 } 3094 3095 if (currentIterator == defaultIterator) { 3096 if (lowIterator.hasNext()) { 3097 currentIterator = lowIterator; 3098 return currentIterator.hasNext(); 3099 } 3100 return false; 3101 } 3102 } 3103 return currentIterator.hasNext(); 3104 } 3105 3106 @Override 3107 public Entry<Long, MessageKeys> next() { 3108 Entry<Long, MessageKeys> result = currentIterator.next(); 3109 if (result != null) { 3110 Long key = result.getKey(); 3111 if (highIterator != null) { 3112 if (currentIterator == defaultIterator) { 3113 lastDefaultKey = key; 3114 } else if (currentIterator == highIterator) { 3115 lastHighKey = key; 3116 } else { 3117 lastLowKey = key; 3118 } 3119 } else { 3120 lastDefaultKey = key; 3121 } 3122 } 3123 return result; 3124 } 3125 3126 @Override 3127 public void remove() { 3128 throw new UnsupportedOperationException(); 3129 } 3130 3131 } 3132 } 3133 3134 private static class HashSetStringMarshaller extends VariableMarshaller<HashSet<String>> { 3135 final static HashSetStringMarshaller INSTANCE = new HashSetStringMarshaller(); 3136 3137 @Override 3138 public void writePayload(HashSet<String> object, DataOutput dataOut) throws IOException { 3139 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 3140 ObjectOutputStream oout = new ObjectOutputStream(baos); 3141 oout.writeObject(object); 3142 oout.flush(); 3143 oout.close(); 3144 byte[] data = baos.toByteArray(); 3145 dataOut.writeInt(data.length); 3146 dataOut.write(data); 3147 } 3148 3149 @Override 3150 @SuppressWarnings("unchecked") 3151 public HashSet<String> readPayload(DataInput dataIn) throws IOException { 3152 int dataLen = dataIn.readInt(); 3153 byte[] data = new byte[dataLen]; 3154 dataIn.readFully(data); 3155 ByteArrayInputStream bais = new ByteArrayInputStream(data); 3156 ObjectInputStream oin = new ObjectInputStream(bais); 3157 try { 3158 return (HashSet<String>) oin.readObject(); 3159 } catch (ClassNotFoundException cfe) { 3160 IOException ioe = new IOException("Failed to read HashSet<String>: " + cfe); 3161 ioe.initCause(cfe); 3162 throw ioe; 3163 } 3164 } 3165 } 3166 3167 public File getIndexDirectory() { 3168 return indexDirectory; 3169 } 3170 3171 public void setIndexDirectory(File indexDirectory) { 3172 this.indexDirectory = indexDirectory; 3173 } 3174 3175 interface IndexAware { 3176 public void sequenceAssignedWithIndexLocked(long index); 3177 } 3178}