001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.activemq.store.kahadb; 018 019import java.io.ByteArrayInputStream; 020import java.io.ByteArrayOutputStream; 021import java.io.DataInput; 022import java.io.DataOutput; 023import java.io.EOFException; 024import java.io.File; 025import java.io.IOException; 026import java.io.InputStream; 027import java.io.InterruptedIOException; 028import java.io.ObjectInputStream; 029import java.io.ObjectOutputStream; 030import java.io.OutputStream; 031import java.util.ArrayList; 032import java.util.Arrays; 033import java.util.Collection; 034import java.util.Collections; 035import java.util.Date; 036import java.util.HashMap; 037import java.util.HashSet; 038import java.util.Iterator; 039import java.util.LinkedHashMap; 040import java.util.LinkedHashSet; 041import java.util.LinkedList; 042import java.util.List; 043import java.util.Map; 044import java.util.Map.Entry; 045import java.util.Set; 046import java.util.SortedSet; 047import java.util.TreeMap; 048import java.util.TreeSet; 049import java.util.concurrent.ConcurrentHashMap; 050import java.util.concurrent.ConcurrentMap; 051import java.util.concurrent.atomic.AtomicBoolean; 052import java.util.concurrent.atomic.AtomicLong; 053import java.util.concurrent.locks.ReentrantReadWriteLock; 054 055import org.apache.activemq.ActiveMQMessageAuditNoSync; 056import org.apache.activemq.broker.BrokerService; 057import org.apache.activemq.broker.BrokerServiceAware; 058import org.apache.activemq.broker.region.Destination; 059import org.apache.activemq.broker.region.Queue; 060import org.apache.activemq.broker.region.Topic; 061import org.apache.activemq.command.MessageAck; 062import org.apache.activemq.command.TransactionId; 063import org.apache.activemq.openwire.OpenWireFormat; 064import org.apache.activemq.protobuf.Buffer; 065import org.apache.activemq.store.MessageStore; 066import org.apache.activemq.store.MessageStoreStatistics; 067import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand; 068import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand; 069import org.apache.activemq.store.kahadb.data.KahaCommitCommand; 070import org.apache.activemq.store.kahadb.data.KahaDestination; 071import org.apache.activemq.store.kahadb.data.KahaEntryType; 072import org.apache.activemq.store.kahadb.data.KahaPrepareCommand; 073import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand; 074import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand; 075import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand; 076import org.apache.activemq.store.kahadb.data.KahaRollbackCommand; 077import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand; 078import org.apache.activemq.store.kahadb.data.KahaTraceCommand; 079import org.apache.activemq.store.kahadb.data.KahaTransactionInfo; 080import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand; 081import org.apache.activemq.store.kahadb.disk.index.BTreeIndex; 082import org.apache.activemq.store.kahadb.disk.index.BTreeVisitor; 083import org.apache.activemq.store.kahadb.disk.index.ListIndex; 084import org.apache.activemq.store.kahadb.disk.journal.DataFile; 085import org.apache.activemq.store.kahadb.disk.journal.Journal; 086import org.apache.activemq.store.kahadb.disk.journal.Location; 087import org.apache.activemq.store.kahadb.disk.page.Page; 088import org.apache.activemq.store.kahadb.disk.page.PageFile; 089import org.apache.activemq.store.kahadb.disk.page.Transaction; 090import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller; 091import org.apache.activemq.store.kahadb.disk.util.LongMarshaller; 092import org.apache.activemq.store.kahadb.disk.util.Marshaller; 093import org.apache.activemq.store.kahadb.disk.util.Sequence; 094import org.apache.activemq.store.kahadb.disk.util.SequenceSet; 095import org.apache.activemq.store.kahadb.disk.util.StringMarshaller; 096import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller; 097import org.apache.activemq.util.ByteSequence; 098import org.apache.activemq.util.DataByteArrayInputStream; 099import org.apache.activemq.util.DataByteArrayOutputStream; 100import org.apache.activemq.util.IOHelper; 101import org.apache.activemq.util.ServiceStopper; 102import org.apache.activemq.util.ServiceSupport; 103import org.slf4j.Logger; 104import org.slf4j.LoggerFactory; 105 106public abstract class MessageDatabase extends ServiceSupport implements BrokerServiceAware { 107 108 protected BrokerService brokerService; 109 110 public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME"; 111 public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0); 112 public static final File DEFAULT_DIRECTORY = new File("KahaDB"); 113 protected static final Buffer UNMATCHED; 114 static { 115 UNMATCHED = new Buffer(new byte[]{}); 116 } 117 private static final Logger LOG = LoggerFactory.getLogger(MessageDatabase.class); 118 119 static final int CLOSED_STATE = 1; 120 static final int OPEN_STATE = 2; 121 static final long NOT_ACKED = -1; 122 123 static final int VERSION = 6; 124 125 protected class Metadata { 126 protected Page<Metadata> page; 127 protected int state; 128 protected BTreeIndex<String, StoredDestination> destinations; 129 protected Location lastUpdate; 130 protected Location firstInProgressTransactionLocation; 131 protected Location producerSequenceIdTrackerLocation = null; 132 protected Location ackMessageFileMapLocation = null; 133 protected transient ActiveMQMessageAuditNoSync producerSequenceIdTracker = new ActiveMQMessageAuditNoSync(); 134 protected transient Map<Integer, Set<Integer>> ackMessageFileMap = new HashMap<Integer, Set<Integer>>(); 135 protected int version = VERSION; 136 protected int openwireVersion = OpenWireFormat.DEFAULT_STORE_VERSION; 137 138 public void read(DataInput is) throws IOException { 139 state = is.readInt(); 140 destinations = new BTreeIndex<String, StoredDestination>(pageFile, is.readLong()); 141 if (is.readBoolean()) { 142 lastUpdate = LocationMarshaller.INSTANCE.readPayload(is); 143 } else { 144 lastUpdate = null; 145 } 146 if (is.readBoolean()) { 147 firstInProgressTransactionLocation = LocationMarshaller.INSTANCE.readPayload(is); 148 } else { 149 firstInProgressTransactionLocation = null; 150 } 151 try { 152 if (is.readBoolean()) { 153 producerSequenceIdTrackerLocation = LocationMarshaller.INSTANCE.readPayload(is); 154 } else { 155 producerSequenceIdTrackerLocation = null; 156 } 157 } catch (EOFException expectedOnUpgrade) { 158 } 159 try { 160 version = is.readInt(); 161 } catch (EOFException expectedOnUpgrade) { 162 version = 1; 163 } 164 if (version >= 5 && is.readBoolean()) { 165 ackMessageFileMapLocation = LocationMarshaller.INSTANCE.readPayload(is); 166 } else { 167 ackMessageFileMapLocation = null; 168 } 169 try { 170 openwireVersion = is.readInt(); 171 } catch (EOFException expectedOnUpgrade) { 172 openwireVersion = OpenWireFormat.DEFAULT_LEGACY_VERSION; 173 } 174 LOG.info("KahaDB is version " + version); 175 } 176 177 public void write(DataOutput os) throws IOException { 178 os.writeInt(state); 179 os.writeLong(destinations.getPageId()); 180 181 if (lastUpdate != null) { 182 os.writeBoolean(true); 183 LocationMarshaller.INSTANCE.writePayload(lastUpdate, os); 184 } else { 185 os.writeBoolean(false); 186 } 187 188 if (firstInProgressTransactionLocation != null) { 189 os.writeBoolean(true); 190 LocationMarshaller.INSTANCE.writePayload(firstInProgressTransactionLocation, os); 191 } else { 192 os.writeBoolean(false); 193 } 194 195 if (producerSequenceIdTrackerLocation != null) { 196 os.writeBoolean(true); 197 LocationMarshaller.INSTANCE.writePayload(producerSequenceIdTrackerLocation, os); 198 } else { 199 os.writeBoolean(false); 200 } 201 os.writeInt(VERSION); 202 if (ackMessageFileMapLocation != null) { 203 os.writeBoolean(true); 204 LocationMarshaller.INSTANCE.writePayload(ackMessageFileMapLocation, os); 205 } else { 206 os.writeBoolean(false); 207 } 208 os.writeInt(this.openwireVersion); 209 } 210 } 211 212 class MetadataMarshaller extends VariableMarshaller<Metadata> { 213 @Override 214 public Metadata readPayload(DataInput dataIn) throws IOException { 215 Metadata rc = createMetadata(); 216 rc.read(dataIn); 217 return rc; 218 } 219 220 @Override 221 public void writePayload(Metadata object, DataOutput dataOut) throws IOException { 222 object.write(dataOut); 223 } 224 } 225 226 protected PageFile pageFile; 227 protected Journal journal; 228 protected Metadata metadata = new Metadata(); 229 230 protected MetadataMarshaller metadataMarshaller = new MetadataMarshaller(); 231 232 protected boolean failIfDatabaseIsLocked; 233 234 protected boolean deleteAllMessages; 235 protected File directory = DEFAULT_DIRECTORY; 236 protected File indexDirectory = null; 237 protected Thread checkpointThread; 238 protected boolean enableJournalDiskSyncs=true; 239 protected boolean archiveDataLogs; 240 protected File directoryArchive; 241 protected AtomicLong journalSize = new AtomicLong(0); 242 long checkpointInterval = 5*1000; 243 long cleanupInterval = 30*1000; 244 int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH; 245 int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE; 246 boolean enableIndexWriteAsync = false; 247 int setIndexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE; 248 private String preallocationScope = Journal.PreallocationScope.ENTIRE_JOURNAL.name(); 249 private String preallocationStrategy = Journal.PreallocationStrategy.SPARSE_FILE.name(); 250 251 protected AtomicBoolean opened = new AtomicBoolean(); 252 private boolean ignoreMissingJournalfiles = false; 253 private int indexCacheSize = 10000; 254 private boolean checkForCorruptJournalFiles = false; 255 private boolean checksumJournalFiles = true; 256 protected boolean forceRecoverIndex = false; 257 private final Object checkpointThreadLock = new Object(); 258 private boolean archiveCorruptedIndex = false; 259 private boolean useIndexLFRUEviction = false; 260 private float indexLFUEvictionFactor = 0.2f; 261 private boolean enableIndexDiskSyncs = true; 262 private boolean enableIndexRecoveryFile = true; 263 private boolean enableIndexPageCaching = true; 264 ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock(); 265 266 @Override 267 public void doStart() throws Exception { 268 load(); 269 } 270 271 @Override 272 public void doStop(ServiceStopper stopper) throws Exception { 273 unload(); 274 } 275 276 private void loadPageFile() throws IOException { 277 this.indexLock.writeLock().lock(); 278 try { 279 final PageFile pageFile = getPageFile(); 280 pageFile.load(); 281 pageFile.tx().execute(new Transaction.Closure<IOException>() { 282 @Override 283 public void execute(Transaction tx) throws IOException { 284 if (pageFile.getPageCount() == 0) { 285 // First time this is created.. Initialize the metadata 286 Page<Metadata> page = tx.allocate(); 287 assert page.getPageId() == 0; 288 page.set(metadata); 289 metadata.page = page; 290 metadata.state = CLOSED_STATE; 291 metadata.destinations = new BTreeIndex<String, StoredDestination>(pageFile, tx.allocate().getPageId()); 292 293 tx.store(metadata.page, metadataMarshaller, true); 294 } else { 295 Page<Metadata> page = tx.load(0, metadataMarshaller); 296 metadata = page.get(); 297 metadata.page = page; 298 } 299 metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE); 300 metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller()); 301 metadata.destinations.load(tx); 302 } 303 }); 304 // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted. 305 // Perhaps we should just keep an index of file 306 storedDestinations.clear(); 307 pageFile.tx().execute(new Transaction.Closure<IOException>() { 308 @Override 309 public void execute(Transaction tx) throws IOException { 310 for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) { 311 Entry<String, StoredDestination> entry = iterator.next(); 312 StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null); 313 storedDestinations.put(entry.getKey(), sd); 314 315 if (checkForCorruptJournalFiles) { 316 // sanity check the index also 317 if (!entry.getValue().locationIndex.isEmpty(tx)) { 318 if (entry.getValue().orderIndex.nextMessageId <= 0) { 319 throw new IOException("Detected uninitialized orderIndex nextMessageId with pending messages for " + entry.getKey()); 320 } 321 } 322 } 323 } 324 } 325 }); 326 pageFile.flush(); 327 } finally { 328 this.indexLock.writeLock().unlock(); 329 } 330 } 331 332 private void startCheckpoint() { 333 if (checkpointInterval == 0 && cleanupInterval == 0) { 334 LOG.info("periodic checkpoint/cleanup disabled, will ocurr on clean shutdown/restart"); 335 return; 336 } 337 synchronized (checkpointThreadLock) { 338 boolean start = false; 339 if (checkpointThread == null) { 340 start = true; 341 } else if (!checkpointThread.isAlive()) { 342 start = true; 343 LOG.info("KahaDB: Recovering checkpoint thread after death"); 344 } 345 if (start) { 346 checkpointThread = new Thread("ActiveMQ Journal Checkpoint Worker") { 347 @Override 348 public void run() { 349 try { 350 long lastCleanup = System.currentTimeMillis(); 351 long lastCheckpoint = System.currentTimeMillis(); 352 // Sleep for a short time so we can periodically check 353 // to see if we need to exit this thread. 354 long sleepTime = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500); 355 while (opened.get()) { 356 Thread.sleep(sleepTime); 357 long now = System.currentTimeMillis(); 358 if( cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval) ) { 359 checkpointCleanup(true); 360 lastCleanup = now; 361 lastCheckpoint = now; 362 } else if( checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval )) { 363 checkpointCleanup(false); 364 lastCheckpoint = now; 365 } 366 } 367 } catch (InterruptedException e) { 368 // Looks like someone really wants us to exit this thread... 369 } catch (IOException ioe) { 370 LOG.error("Checkpoint failed", ioe); 371 brokerService.handleIOException(ioe); 372 } 373 } 374 }; 375 376 checkpointThread.setDaemon(true); 377 checkpointThread.start(); 378 } 379 } 380 } 381 382 public void open() throws IOException { 383 if( opened.compareAndSet(false, true) ) { 384 getJournal().start(); 385 try { 386 loadPageFile(); 387 } catch (Throwable t) { 388 LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t); 389 if (LOG.isDebugEnabled()) { 390 LOG.debug("Index load failure", t); 391 } 392 // try to recover index 393 try { 394 pageFile.unload(); 395 } catch (Exception ignore) {} 396 if (archiveCorruptedIndex) { 397 pageFile.archive(); 398 } else { 399 pageFile.delete(); 400 } 401 metadata = createMetadata(); 402 pageFile = null; 403 loadPageFile(); 404 } 405 startCheckpoint(); 406 recover(); 407 } 408 } 409 410 public void load() throws IOException { 411 this.indexLock.writeLock().lock(); 412 IOHelper.mkdirs(directory); 413 try { 414 if (deleteAllMessages) { 415 getJournal().start(); 416 getJournal().delete(); 417 getJournal().close(); 418 journal = null; 419 getPageFile().delete(); 420 LOG.info("Persistence store purged."); 421 deleteAllMessages = false; 422 } 423 424 open(); 425 store(new KahaTraceCommand().setMessage("LOADED " + new Date())); 426 } finally { 427 this.indexLock.writeLock().unlock(); 428 } 429 } 430 431 public void close() throws IOException, InterruptedException { 432 if( opened.compareAndSet(true, false)) { 433 checkpointLock.writeLock().lock(); 434 try { 435 if (metadata.page != null) { 436 checkpointUpdate(true); 437 } 438 pageFile.unload(); 439 metadata = createMetadata(); 440 } finally { 441 checkpointLock.writeLock().unlock(); 442 } 443 journal.close(); 444 synchronized (checkpointThreadLock) { 445 if (checkpointThread != null) { 446 checkpointThread.join(); 447 } 448 } 449 //clear the cache on shutdown of the store 450 storeCache.clear(); 451 } 452 } 453 454 public void unload() throws IOException, InterruptedException { 455 this.indexLock.writeLock().lock(); 456 try { 457 if( pageFile != null && pageFile.isLoaded() ) { 458 metadata.state = CLOSED_STATE; 459 metadata.firstInProgressTransactionLocation = getInProgressTxLocationRange()[0]; 460 461 if (metadata.page != null) { 462 pageFile.tx().execute(new Transaction.Closure<IOException>() { 463 @Override 464 public void execute(Transaction tx) throws IOException { 465 tx.store(metadata.page, metadataMarshaller, true); 466 } 467 }); 468 } 469 } 470 } finally { 471 this.indexLock.writeLock().unlock(); 472 } 473 close(); 474 } 475 476 // public for testing 477 @SuppressWarnings("rawtypes") 478 public Location[] getInProgressTxLocationRange() { 479 Location[] range = new Location[]{null, null}; 480 synchronized (inflightTransactions) { 481 if (!inflightTransactions.isEmpty()) { 482 for (List<Operation> ops : inflightTransactions.values()) { 483 if (!ops.isEmpty()) { 484 trackMaxAndMin(range, ops); 485 } 486 } 487 } 488 if (!preparedTransactions.isEmpty()) { 489 for (List<Operation> ops : preparedTransactions.values()) { 490 if (!ops.isEmpty()) { 491 trackMaxAndMin(range, ops); 492 } 493 } 494 } 495 } 496 return range; 497 } 498 499 @SuppressWarnings("rawtypes") 500 private void trackMaxAndMin(Location[] range, List<Operation> ops) { 501 Location t = ops.get(0).getLocation(); 502 if (range[0]==null || t.compareTo(range[0]) <= 0) { 503 range[0] = t; 504 } 505 t = ops.get(ops.size() -1).getLocation(); 506 if (range[1]==null || t.compareTo(range[1]) >= 0) { 507 range[1] = t; 508 } 509 } 510 511 class TranInfo { 512 TransactionId id; 513 Location location; 514 515 class opCount { 516 int add; 517 int remove; 518 } 519 HashMap<KahaDestination, opCount> destinationOpCount = new HashMap<KahaDestination, opCount>(); 520 521 @SuppressWarnings("rawtypes") 522 public void track(Operation operation) { 523 if (location == null ) { 524 location = operation.getLocation(); 525 } 526 KahaDestination destination; 527 boolean isAdd = false; 528 if (operation instanceof AddOperation) { 529 AddOperation add = (AddOperation) operation; 530 destination = add.getCommand().getDestination(); 531 isAdd = true; 532 } else { 533 RemoveOperation removeOpperation = (RemoveOperation) operation; 534 destination = removeOpperation.getCommand().getDestination(); 535 } 536 opCount opCount = destinationOpCount.get(destination); 537 if (opCount == null) { 538 opCount = new opCount(); 539 destinationOpCount.put(destination, opCount); 540 } 541 if (isAdd) { 542 opCount.add++; 543 } else { 544 opCount.remove++; 545 } 546 } 547 548 @Override 549 public String toString() { 550 StringBuffer buffer = new StringBuffer(); 551 buffer.append(location).append(";").append(id).append(";\n"); 552 for (Entry<KahaDestination, opCount> op : destinationOpCount.entrySet()) { 553 buffer.append(op.getKey()).append('+').append(op.getValue().add).append(',').append('-').append(op.getValue().remove).append(';'); 554 } 555 return buffer.toString(); 556 } 557 } 558 559 @SuppressWarnings("rawtypes") 560 public String getTransactions() { 561 562 ArrayList<TranInfo> infos = new ArrayList<TranInfo>(); 563 synchronized (inflightTransactions) { 564 if (!inflightTransactions.isEmpty()) { 565 for (Entry<TransactionId, List<Operation>> entry : inflightTransactions.entrySet()) { 566 TranInfo info = new TranInfo(); 567 info.id = entry.getKey(); 568 for (Operation operation : entry.getValue()) { 569 info.track(operation); 570 } 571 infos.add(info); 572 } 573 } 574 } 575 synchronized (preparedTransactions) { 576 if (!preparedTransactions.isEmpty()) { 577 for (Entry<TransactionId, List<Operation>> entry : preparedTransactions.entrySet()) { 578 TranInfo info = new TranInfo(); 579 info.id = entry.getKey(); 580 for (Operation operation : entry.getValue()) { 581 info.track(operation); 582 } 583 infos.add(info); 584 } 585 } 586 } 587 return infos.toString(); 588 } 589 590 /** 591 * Move all the messages that were in the journal into long term storage. We 592 * just replay and do a checkpoint. 593 * 594 * @throws IOException 595 * @throws IOException 596 * @throws IllegalStateException 597 */ 598 private void recover() throws IllegalStateException, IOException { 599 this.indexLock.writeLock().lock(); 600 try { 601 602 long start = System.currentTimeMillis(); 603 Location producerAuditPosition = recoverProducerAudit(); 604 Location ackMessageFileLocation = recoverAckMessageFileMap(); 605 Location lastIndoubtPosition = getRecoveryPosition(); 606 607 Location recoveryPosition = minimum(producerAuditPosition, ackMessageFileLocation); 608 recoveryPosition = minimum(recoveryPosition, lastIndoubtPosition); 609 610 if (recoveryPosition != null) { 611 int redoCounter = 0; 612 LOG.info("Recovering from the journal @" + recoveryPosition); 613 while (recoveryPosition != null) { 614 try { 615 JournalCommand<?> message = load(recoveryPosition); 616 metadata.lastUpdate = recoveryPosition; 617 process(message, recoveryPosition, lastIndoubtPosition); 618 redoCounter++; 619 } catch (IOException failedRecovery) { 620 if (isIgnoreMissingJournalfiles()) { 621 LOG.debug("Failed to recover data at position:" + recoveryPosition, failedRecovery); 622 // track this dud location 623 journal.corruptRecoveryLocation(recoveryPosition); 624 } else { 625 throw new IOException("Failed to recover data at position:" + recoveryPosition, failedRecovery); 626 } 627 } 628 recoveryPosition = journal.getNextLocation(recoveryPosition); 629 if (LOG.isInfoEnabled() && redoCounter % 100000 == 0) { 630 LOG.info("@" + recoveryPosition + ", " + redoCounter + " entries recovered .."); 631 } 632 } 633 if (LOG.isInfoEnabled()) { 634 long end = System.currentTimeMillis(); 635 LOG.info("Recovery replayed " + redoCounter + " operations from the journal in " + ((end - start) / 1000.0f) + " seconds."); 636 } 637 } 638 639 // We may have to undo some index updates. 640 pageFile.tx().execute(new Transaction.Closure<IOException>() { 641 @Override 642 public void execute(Transaction tx) throws IOException { 643 recoverIndex(tx); 644 } 645 }); 646 647 // rollback any recovered inflight local transactions, and discard any inflight XA transactions. 648 Set<TransactionId> toRollback = new HashSet<TransactionId>(); 649 Set<TransactionId> toDiscard = new HashSet<TransactionId>(); 650 synchronized (inflightTransactions) { 651 for (Iterator<TransactionId> it = inflightTransactions.keySet().iterator(); it.hasNext(); ) { 652 TransactionId id = it.next(); 653 if (id.isLocalTransaction()) { 654 toRollback.add(id); 655 } else { 656 toDiscard.add(id); 657 } 658 } 659 for (TransactionId tx: toRollback) { 660 if (LOG.isDebugEnabled()) { 661 LOG.debug("rolling back recovered indoubt local transaction " + tx); 662 } 663 store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convertToLocal(tx)), false, null, null); 664 } 665 for (TransactionId tx: toDiscard) { 666 if (LOG.isDebugEnabled()) { 667 LOG.debug("discarding recovered in-flight XA transaction " + tx); 668 } 669 inflightTransactions.remove(tx); 670 } 671 } 672 673 synchronized (preparedTransactions) { 674 for (TransactionId txId : preparedTransactions.keySet()) { 675 LOG.warn("Recovered prepared XA TX: [{}]", txId); 676 } 677 } 678 679 } finally { 680 this.indexLock.writeLock().unlock(); 681 } 682 } 683 684 @SuppressWarnings("unused") 685 private KahaTransactionInfo createLocalTransactionInfo(TransactionId tx) { 686 return TransactionIdConversion.convertToLocal(tx); 687 } 688 689 private Location minimum(Location producerAuditPosition, 690 Location lastIndoubtPosition) { 691 Location min = null; 692 if (producerAuditPosition != null) { 693 min = producerAuditPosition; 694 if (lastIndoubtPosition != null && lastIndoubtPosition.compareTo(producerAuditPosition) < 0) { 695 min = lastIndoubtPosition; 696 } 697 } else { 698 min = lastIndoubtPosition; 699 } 700 return min; 701 } 702 703 private Location recoverProducerAudit() throws IOException { 704 if (metadata.producerSequenceIdTrackerLocation != null) { 705 KahaProducerAuditCommand audit = (KahaProducerAuditCommand) load(metadata.producerSequenceIdTrackerLocation); 706 try { 707 ObjectInputStream objectIn = new ObjectInputStream(audit.getAudit().newInput()); 708 int maxNumProducers = getMaxFailoverProducersToTrack(); 709 int maxAuditDepth = getFailoverProducersAuditDepth(); 710 metadata.producerSequenceIdTracker = (ActiveMQMessageAuditNoSync) objectIn.readObject(); 711 metadata.producerSequenceIdTracker.setAuditDepth(maxAuditDepth); 712 metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxNumProducers); 713 return journal.getNextLocation(metadata.producerSequenceIdTrackerLocation); 714 } catch (Exception e) { 715 LOG.warn("Cannot recover message audit", e); 716 return journal.getNextLocation(null); 717 } 718 } else { 719 // got no audit stored so got to recreate via replay from start of the journal 720 return journal.getNextLocation(null); 721 } 722 } 723 724 @SuppressWarnings("unchecked") 725 private Location recoverAckMessageFileMap() throws IOException { 726 if (metadata.ackMessageFileMapLocation != null) { 727 KahaAckMessageFileMapCommand audit = (KahaAckMessageFileMapCommand) load(metadata.ackMessageFileMapLocation); 728 try { 729 ObjectInputStream objectIn = new ObjectInputStream(audit.getAckMessageFileMap().newInput()); 730 metadata.ackMessageFileMap = (Map<Integer, Set<Integer>>) objectIn.readObject(); 731 return journal.getNextLocation(metadata.ackMessageFileMapLocation); 732 } catch (Exception e) { 733 LOG.warn("Cannot recover ackMessageFileMap", e); 734 return journal.getNextLocation(null); 735 } 736 } else { 737 // got no ackMessageFileMap stored so got to recreate via replay from start of the journal 738 return journal.getNextLocation(null); 739 } 740 } 741 742 protected void recoverIndex(Transaction tx) throws IOException { 743 long start = System.currentTimeMillis(); 744 // It is possible index updates got applied before the journal updates.. 745 // in that case we need to removed references to messages that are not in the journal 746 final Location lastAppendLocation = journal.getLastAppendLocation(); 747 long undoCounter=0; 748 749 // Go through all the destinations to see if they have messages past the lastAppendLocation 750 for (String key : storedDestinations.keySet()) { 751 StoredDestination sd = storedDestinations.get(key); 752 753 final ArrayList<Long> matches = new ArrayList<Long>(); 754 // Find all the Locations that are >= than the last Append Location. 755 sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) { 756 @Override 757 protected void matched(Location key, Long value) { 758 matches.add(value); 759 } 760 }); 761 762 for (Long sequenceId : matches) { 763 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 764 sd.locationIndex.remove(tx, keys.location); 765 sd.messageIdIndex.remove(tx, keys.messageId); 766 metadata.producerSequenceIdTracker.rollback(keys.messageId); 767 undoCounter++; 768 decrementAndSubSizeToStoreStat(key, keys.location.getSize()); 769 // TODO: do we need to modify the ack positions for the pub sub case? 770 } 771 } 772 773 if( undoCounter > 0 ) { 774 // The rolledback operations are basically in flight journal writes. To avoid getting 775 // these the end user should do sync writes to the journal. 776 if (LOG.isInfoEnabled()) { 777 long end = System.currentTimeMillis(); 778 LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds."); 779 } 780 } 781 782 undoCounter = 0; 783 start = System.currentTimeMillis(); 784 785 // Lets be extra paranoid here and verify that all the datafiles being referenced 786 // by the indexes still exists. 787 788 final SequenceSet ss = new SequenceSet(); 789 for (StoredDestination sd : storedDestinations.values()) { 790 // Use a visitor to cut down the number of pages that we load 791 sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() { 792 int last=-1; 793 794 @Override 795 public boolean isInterestedInKeysBetween(Location first, Location second) { 796 if( first==null ) { 797 return !ss.contains(0, second.getDataFileId()); 798 } else if( second==null ) { 799 return true; 800 } else { 801 return !ss.contains(first.getDataFileId(), second.getDataFileId()); 802 } 803 } 804 805 @Override 806 public void visit(List<Location> keys, List<Long> values) { 807 for (Location l : keys) { 808 int fileId = l.getDataFileId(); 809 if( last != fileId ) { 810 ss.add(fileId); 811 last = fileId; 812 } 813 } 814 } 815 816 }); 817 } 818 HashSet<Integer> missingJournalFiles = new HashSet<Integer>(); 819 while (!ss.isEmpty()) { 820 missingJournalFiles.add((int) ss.removeFirst()); 821 } 822 missingJournalFiles.removeAll(journal.getFileMap().keySet()); 823 824 if (!missingJournalFiles.isEmpty()) { 825 if (LOG.isInfoEnabled()) { 826 LOG.info("Some journal files are missing: " + missingJournalFiles); 827 } 828 } 829 830 ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<BTreeVisitor.Predicate<Location>>(); 831 for (Integer missing : missingJournalFiles) { 832 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0))); 833 } 834 835 if (checkForCorruptJournalFiles) { 836 Collection<DataFile> dataFiles = journal.getFileMap().values(); 837 for (DataFile dataFile : dataFiles) { 838 int id = dataFile.getDataFileId(); 839 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0))); 840 Sequence seq = dataFile.getCorruptedBlocks().getHead(); 841 while (seq != null) { 842 missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1))); 843 seq = seq.getNext(); 844 } 845 } 846 } 847 848 if (!missingPredicates.isEmpty()) { 849 for (Entry<String, StoredDestination> sdEntry : storedDestinations.entrySet()) { 850 final StoredDestination sd = sdEntry.getValue(); 851 final ArrayList<Long> matches = new ArrayList<Long>(); 852 sd.locationIndex.visit(tx, new BTreeVisitor.OrVisitor<Location, Long>(missingPredicates) { 853 @Override 854 protected void matched(Location key, Long value) { 855 matches.add(value); 856 } 857 }); 858 859 // If somes message references are affected by the missing data files... 860 if (!matches.isEmpty()) { 861 862 // We either 'gracefully' recover dropping the missing messages or 863 // we error out. 864 if( ignoreMissingJournalfiles ) { 865 // Update the index to remove the references to the missing data 866 for (Long sequenceId : matches) { 867 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 868 sd.locationIndex.remove(tx, keys.location); 869 sd.messageIdIndex.remove(tx, keys.messageId); 870 LOG.info("[" + sdEntry.getKey() + "] dropped: " + keys.messageId + " at corrupt location: " + keys.location); 871 undoCounter++; 872 decrementAndSubSizeToStoreStat(sdEntry.getKey(), keys.location.getSize()); 873 // TODO: do we need to modify the ack positions for the pub sub case? 874 } 875 } else { 876 throw new IOException("Detected missing/corrupt journal files. "+matches.size()+" messages affected."); 877 } 878 } 879 } 880 } 881 882 if( undoCounter > 0 ) { 883 // The rolledback operations are basically in flight journal writes. To avoid getting these the end user 884 // should do sync writes to the journal. 885 if (LOG.isInfoEnabled()) { 886 long end = System.currentTimeMillis(); 887 LOG.info("Detected missing/corrupt journal files. Dropped " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds."); 888 } 889 } 890 } 891 892 private Location nextRecoveryPosition; 893 private Location lastRecoveryPosition; 894 895 public void incrementalRecover() throws IOException { 896 this.indexLock.writeLock().lock(); 897 try { 898 if( nextRecoveryPosition == null ) { 899 if( lastRecoveryPosition==null ) { 900 nextRecoveryPosition = getRecoveryPosition(); 901 } else { 902 nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition); 903 } 904 } 905 while (nextRecoveryPosition != null) { 906 lastRecoveryPosition = nextRecoveryPosition; 907 metadata.lastUpdate = lastRecoveryPosition; 908 JournalCommand<?> message = load(lastRecoveryPosition); 909 process(message, lastRecoveryPosition, (IndexAware) null); 910 nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition); 911 } 912 } finally { 913 this.indexLock.writeLock().unlock(); 914 } 915 } 916 917 public Location getLastUpdatePosition() throws IOException { 918 return metadata.lastUpdate; 919 } 920 921 private Location getRecoveryPosition() throws IOException { 922 923 if (!this.forceRecoverIndex) { 924 925 // If we need to recover the transactions.. 926 if (metadata.firstInProgressTransactionLocation != null) { 927 return metadata.firstInProgressTransactionLocation; 928 } 929 930 // Perhaps there were no transactions... 931 if( metadata.lastUpdate!=null) { 932 // Start replay at the record after the last one recorded in the index file. 933 return journal.getNextLocation(metadata.lastUpdate); 934 } 935 } 936 // This loads the first position. 937 return journal.getNextLocation(null); 938 } 939 940 protected void checkpointCleanup(final boolean cleanup) throws IOException { 941 long start; 942 this.indexLock.writeLock().lock(); 943 try { 944 start = System.currentTimeMillis(); 945 if( !opened.get() ) { 946 return; 947 } 948 } finally { 949 this.indexLock.writeLock().unlock(); 950 } 951 checkpointUpdate(cleanup); 952 long end = System.currentTimeMillis(); 953 if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) { 954 if (LOG.isInfoEnabled()) { 955 LOG.info("Slow KahaDB access: cleanup took " + (end - start)); 956 } 957 } 958 } 959 960 public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException { 961 int size = data.serializedSizeFramed(); 962 DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1); 963 os.writeByte(data.type().getNumber()); 964 data.writeFramed(os); 965 return os.toByteSequence(); 966 } 967 968 // ///////////////////////////////////////////////////////////////// 969 // Methods call by the broker to update and query the store. 970 // ///////////////////////////////////////////////////////////////// 971 public Location store(JournalCommand<?> data) throws IOException { 972 return store(data, false, null,null); 973 } 974 975 public Location store(JournalCommand<?> data, Runnable onJournalStoreComplete) throws IOException { 976 return store(data, false, null, null, onJournalStoreComplete); 977 } 978 979 public Location store(JournalCommand<?> data, boolean sync, IndexAware before,Runnable after) throws IOException { 980 return store(data, sync, before, after, null); 981 } 982 983 /** 984 * All updated are are funneled through this method. The updates are converted 985 * to a JournalMessage which is logged to the journal and then the data from 986 * the JournalMessage is used to update the index just like it would be done 987 * during a recovery process. 988 */ 989 public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException { 990 try { 991 ByteSequence sequence = toByteSequence(data); 992 993 Location location; 994 checkpointLock.readLock().lock(); 995 try { 996 997 long start = System.currentTimeMillis(); 998 location = onJournalStoreComplete == null ? journal.write(sequence, sync) : journal.write(sequence, onJournalStoreComplete) ; 999 long start2 = System.currentTimeMillis(); 1000 process(data, location, before); 1001 1002 long end = System.currentTimeMillis(); 1003 if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) { 1004 if (LOG.isInfoEnabled()) { 1005 LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms"); 1006 } 1007 } 1008 1009 } finally{ 1010 checkpointLock.readLock().unlock(); 1011 } 1012 if (after != null) { 1013 after.run(); 1014 } 1015 1016 if (checkpointThread != null && !checkpointThread.isAlive() && opened.get()) { 1017 startCheckpoint(); 1018 } 1019 return location; 1020 } catch (IOException ioe) { 1021 LOG.error("KahaDB failed to store to Journal", ioe); 1022 brokerService.handleIOException(ioe); 1023 throw ioe; 1024 } 1025 } 1026 1027 /** 1028 * Loads a previously stored JournalMessage 1029 * 1030 * @param location 1031 * @return 1032 * @throws IOException 1033 */ 1034 public JournalCommand<?> load(Location location) throws IOException { 1035 long start = System.currentTimeMillis(); 1036 ByteSequence data = journal.read(location); 1037 long end = System.currentTimeMillis(); 1038 if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) { 1039 if (LOG.isInfoEnabled()) { 1040 LOG.info("Slow KahaDB access: Journal read took: "+(end-start)+" ms"); 1041 } 1042 } 1043 DataByteArrayInputStream is = new DataByteArrayInputStream(data); 1044 byte readByte = is.readByte(); 1045 KahaEntryType type = KahaEntryType.valueOf(readByte); 1046 if( type == null ) { 1047 try { 1048 is.close(); 1049 } catch (IOException e) {} 1050 throw new IOException("Could not load journal record. Invalid location: "+location); 1051 } 1052 JournalCommand<?> message = (JournalCommand<?>)type.createMessage(); 1053 message.mergeFramed(is); 1054 return message; 1055 } 1056 1057 /** 1058 * do minimal recovery till we reach the last inDoubtLocation 1059 * @param data 1060 * @param location 1061 * @param inDoubtlocation 1062 * @throws IOException 1063 */ 1064 void process(JournalCommand<?> data, final Location location, final Location inDoubtlocation) throws IOException { 1065 if (inDoubtlocation != null && location.compareTo(inDoubtlocation) >= 0) { 1066 if (data instanceof KahaSubscriptionCommand) { 1067 KahaSubscriptionCommand kahaSubscriptionCommand = (KahaSubscriptionCommand)data; 1068 if (kahaSubscriptionCommand.hasSubscriptionInfo()) { 1069 // needs to be processed via activate and will be replayed on reconnect 1070 LOG.debug("ignoring add sub command during recovery replay:" + data); 1071 return; 1072 } 1073 } 1074 process(data, location, (IndexAware) null); 1075 } else { 1076 // just recover producer audit 1077 data.visit(new Visitor() { 1078 @Override 1079 public void visit(KahaAddMessageCommand command) throws IOException { 1080 metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId()); 1081 } 1082 }); 1083 } 1084 } 1085 1086 // ///////////////////////////////////////////////////////////////// 1087 // Journaled record processing methods. Once the record is journaled, 1088 // these methods handle applying the index updates. These may be called 1089 // from the recovery method too so they need to be idempotent 1090 // ///////////////////////////////////////////////////////////////// 1091 1092 void process(JournalCommand<?> data, final Location location, final IndexAware onSequenceAssignedCallback) throws IOException { 1093 data.visit(new Visitor() { 1094 @Override 1095 public void visit(KahaAddMessageCommand command) throws IOException { 1096 process(command, location, onSequenceAssignedCallback); 1097 } 1098 1099 @Override 1100 public void visit(KahaRemoveMessageCommand command) throws IOException { 1101 process(command, location); 1102 } 1103 1104 @Override 1105 public void visit(KahaPrepareCommand command) throws IOException { 1106 process(command, location); 1107 } 1108 1109 @Override 1110 public void visit(KahaCommitCommand command) throws IOException { 1111 process(command, location, onSequenceAssignedCallback); 1112 } 1113 1114 @Override 1115 public void visit(KahaRollbackCommand command) throws IOException { 1116 process(command, location); 1117 } 1118 1119 @Override 1120 public void visit(KahaRemoveDestinationCommand command) throws IOException { 1121 process(command, location); 1122 } 1123 1124 @Override 1125 public void visit(KahaSubscriptionCommand command) throws IOException { 1126 process(command, location); 1127 } 1128 1129 @Override 1130 public void visit(KahaProducerAuditCommand command) throws IOException { 1131 processLocation(location); 1132 } 1133 1134 @Override 1135 public void visit(KahaAckMessageFileMapCommand command) throws IOException { 1136 processLocation(location); 1137 } 1138 1139 @Override 1140 public void visit(KahaTraceCommand command) { 1141 processLocation(location); 1142 } 1143 1144 @Override 1145 public void visit(KahaUpdateMessageCommand command) throws IOException { 1146 process(command, location); 1147 } 1148 }); 1149 } 1150 1151 @SuppressWarnings("rawtypes") 1152 protected void process(final KahaAddMessageCommand command, final Location location, final IndexAware runWithIndexLock) throws IOException { 1153 if (command.hasTransactionInfo()) { 1154 List<Operation> inflightTx = getInflightTx(command.getTransactionInfo()); 1155 inflightTx.add(new AddOperation(command, location, runWithIndexLock)); 1156 } else { 1157 this.indexLock.writeLock().lock(); 1158 try { 1159 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1160 @Override 1161 public void execute(Transaction tx) throws IOException { 1162 long assignedIndex = updateIndex(tx, command, location); 1163 if (runWithIndexLock != null) { 1164 runWithIndexLock.sequenceAssignedWithIndexLocked(assignedIndex); 1165 } 1166 } 1167 }); 1168 1169 } finally { 1170 this.indexLock.writeLock().unlock(); 1171 } 1172 } 1173 } 1174 1175 protected void process(final KahaUpdateMessageCommand command, final Location location) throws IOException { 1176 this.indexLock.writeLock().lock(); 1177 try { 1178 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1179 @Override 1180 public void execute(Transaction tx) throws IOException { 1181 updateIndex(tx, command, location); 1182 } 1183 }); 1184 } finally { 1185 this.indexLock.writeLock().unlock(); 1186 } 1187 } 1188 1189 @SuppressWarnings("rawtypes") 1190 protected void process(final KahaRemoveMessageCommand command, final Location location) throws IOException { 1191 if (command.hasTransactionInfo()) { 1192 List<Operation> inflightTx = getInflightTx(command.getTransactionInfo()); 1193 inflightTx.add(new RemoveOperation(command, location)); 1194 } else { 1195 this.indexLock.writeLock().lock(); 1196 try { 1197 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1198 @Override 1199 public void execute(Transaction tx) throws IOException { 1200 updateIndex(tx, command, location); 1201 } 1202 }); 1203 } finally { 1204 this.indexLock.writeLock().unlock(); 1205 } 1206 } 1207 } 1208 1209 protected void process(final KahaRemoveDestinationCommand command, final Location location) throws IOException { 1210 this.indexLock.writeLock().lock(); 1211 try { 1212 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1213 @Override 1214 public void execute(Transaction tx) throws IOException { 1215 updateIndex(tx, command, location); 1216 } 1217 }); 1218 } finally { 1219 this.indexLock.writeLock().unlock(); 1220 } 1221 } 1222 1223 protected void process(final KahaSubscriptionCommand command, final Location location) throws IOException { 1224 this.indexLock.writeLock().lock(); 1225 try { 1226 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1227 @Override 1228 public void execute(Transaction tx) throws IOException { 1229 updateIndex(tx, command, location); 1230 } 1231 }); 1232 } finally { 1233 this.indexLock.writeLock().unlock(); 1234 } 1235 } 1236 1237 protected void processLocation(final Location location) { 1238 this.indexLock.writeLock().lock(); 1239 try { 1240 metadata.lastUpdate = location; 1241 } finally { 1242 this.indexLock.writeLock().unlock(); 1243 } 1244 } 1245 1246 @SuppressWarnings("rawtypes") 1247 protected void process(KahaCommitCommand command, final Location location, final IndexAware before) throws IOException { 1248 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1249 List<Operation> inflightTx; 1250 synchronized (inflightTransactions) { 1251 inflightTx = inflightTransactions.remove(key); 1252 if (inflightTx == null) { 1253 inflightTx = preparedTransactions.remove(key); 1254 } 1255 } 1256 if (inflightTx == null) { 1257 // only non persistent messages in this tx 1258 if (before != null) { 1259 before.sequenceAssignedWithIndexLocked(-1); 1260 } 1261 return; 1262 } 1263 1264 final List<Operation> messagingTx = inflightTx; 1265 indexLock.writeLock().lock(); 1266 try { 1267 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1268 @Override 1269 public void execute(Transaction tx) throws IOException { 1270 for (Operation op : messagingTx) { 1271 op.execute(tx); 1272 } 1273 } 1274 }); 1275 metadata.lastUpdate = location; 1276 } finally { 1277 indexLock.writeLock().unlock(); 1278 } 1279 } 1280 1281 @SuppressWarnings("rawtypes") 1282 protected void process(KahaPrepareCommand command, Location location) { 1283 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1284 synchronized (inflightTransactions) { 1285 List<Operation> tx = inflightTransactions.remove(key); 1286 if (tx != null) { 1287 preparedTransactions.put(key, tx); 1288 } 1289 } 1290 } 1291 1292 @SuppressWarnings("rawtypes") 1293 protected void process(KahaRollbackCommand command, Location location) throws IOException { 1294 TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo()); 1295 List<Operation> updates = null; 1296 synchronized (inflightTransactions) { 1297 updates = inflightTransactions.remove(key); 1298 if (updates == null) { 1299 updates = preparedTransactions.remove(key); 1300 } 1301 } 1302 } 1303 1304 // ///////////////////////////////////////////////////////////////// 1305 // These methods do the actual index updates. 1306 // ///////////////////////////////////////////////////////////////// 1307 1308 protected final ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock(); 1309 private final HashSet<Integer> journalFilesBeingReplicated = new HashSet<Integer>(); 1310 1311 long updateIndex(Transaction tx, KahaAddMessageCommand command, Location location) throws IOException { 1312 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1313 1314 // Skip adding the message to the index if this is a topic and there are 1315 // no subscriptions. 1316 if (sd.subscriptions != null && sd.subscriptions.isEmpty(tx)) { 1317 return -1; 1318 } 1319 1320 // Add the message. 1321 int priority = command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY; 1322 long id = sd.orderIndex.getNextMessageId(priority); 1323 Long previous = sd.locationIndex.put(tx, location, id); 1324 if (previous == null) { 1325 previous = sd.messageIdIndex.put(tx, command.getMessageId(), id); 1326 if (previous == null) { 1327 incrementAndAddSizeToStoreStat(command.getDestination(), location.getSize()); 1328 sd.orderIndex.put(tx, priority, id, new MessageKeys(command.getMessageId(), location)); 1329 if (sd.subscriptions != null && !sd.subscriptions.isEmpty(tx)) { 1330 addAckLocationForNewMessage(tx, sd, id); 1331 } 1332 metadata.lastUpdate = location; 1333 } else { 1334 1335 MessageKeys messageKeys = sd.orderIndex.get(tx, previous); 1336 if (messageKeys != null && messageKeys.location.compareTo(location) < 0) { 1337 // If the message ID is indexed, then the broker asked us to store a duplicate before the message was dispatched and acked, we ignore this add attempt 1338 LOG.warn("Duplicate message add attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId()); 1339 } 1340 sd.messageIdIndex.put(tx, command.getMessageId(), previous); 1341 sd.locationIndex.remove(tx, location); 1342 id = -1; 1343 } 1344 } else { 1345 // restore the previous value.. Looks like this was a redo of a previously 1346 // added message. We don't want to assign it a new id as the other indexes would 1347 // be wrong.. 1348 sd.locationIndex.put(tx, location, previous); 1349 metadata.lastUpdate = location; 1350 } 1351 // record this id in any event, initial send or recovery 1352 metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId()); 1353 1354 return id; 1355 } 1356 1357 void trackPendingAdd(KahaDestination destination, Long seq) { 1358 StoredDestination sd = storedDestinations.get(key(destination)); 1359 if (sd != null) { 1360 sd.trackPendingAdd(seq); 1361 } 1362 } 1363 1364 void trackPendingAddComplete(KahaDestination destination, Long seq) { 1365 StoredDestination sd = storedDestinations.get(key(destination)); 1366 if (sd != null) { 1367 sd.trackPendingAddComplete(seq); 1368 } 1369 } 1370 1371 void updateIndex(Transaction tx, KahaUpdateMessageCommand updateMessageCommand, Location location) throws IOException { 1372 KahaAddMessageCommand command = updateMessageCommand.getMessage(); 1373 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1374 1375 Long id = sd.messageIdIndex.get(tx, command.getMessageId()); 1376 if (id != null) { 1377 MessageKeys previousKeys = sd.orderIndex.put( 1378 tx, 1379 command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY, 1380 id, 1381 new MessageKeys(command.getMessageId(), location) 1382 ); 1383 sd.locationIndex.put(tx, location, id); 1384 incrementAndAddSizeToStoreStat(command.getDestination(), location.getSize()); 1385 // on first update previous is original location, on recovery/replay it may be the updated location 1386 if(previousKeys != null && !previousKeys.location.equals(location)) { 1387 sd.locationIndex.remove(tx, previousKeys.location); 1388 decrementAndSubSizeToStoreStat(command.getDestination(), previousKeys.location.getSize()); 1389 } 1390 metadata.lastUpdate = location; 1391 } else { 1392 LOG.warn("Non existent message update attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId()); 1393 } 1394 } 1395 1396 void updateIndex(Transaction tx, KahaRemoveMessageCommand command, Location ackLocation) throws IOException { 1397 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1398 if (!command.hasSubscriptionKey()) { 1399 1400 // In the queue case we just remove the message from the index.. 1401 Long sequenceId = sd.messageIdIndex.remove(tx, command.getMessageId()); 1402 if (sequenceId != null) { 1403 MessageKeys keys = sd.orderIndex.remove(tx, sequenceId); 1404 if (keys != null) { 1405 sd.locationIndex.remove(tx, keys.location); 1406 decrementAndSubSizeToStoreStat(command.getDestination(), keys.location.getSize()); 1407 recordAckMessageReferenceLocation(ackLocation, keys.location); 1408 metadata.lastUpdate = ackLocation; 1409 } else if (LOG.isDebugEnabled()) { 1410 LOG.debug("message not found in order index: " + sequenceId + " for: " + command.getMessageId()); 1411 } 1412 } else if (LOG.isDebugEnabled()) { 1413 LOG.debug("message not found in sequence id index: " + command.getMessageId()); 1414 } 1415 } else { 1416 // In the topic case we need remove the message once it's been acked 1417 // by all the subs 1418 Long sequence = sd.messageIdIndex.get(tx, command.getMessageId()); 1419 1420 // Make sure it's a valid message id... 1421 if (sequence != null) { 1422 String subscriptionKey = command.getSubscriptionKey(); 1423 if (command.getAck() != UNMATCHED) { 1424 sd.orderIndex.get(tx, sequence); 1425 byte priority = sd.orderIndex.lastGetPriority(); 1426 sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(sequence, priority)); 1427 } 1428 1429 MessageKeys keys = sd.orderIndex.get(tx, sequence); 1430 if (keys != null) { 1431 recordAckMessageReferenceLocation(ackLocation, keys.location); 1432 } 1433 // The following method handles deleting un-referenced messages. 1434 removeAckLocation(command, tx, sd, subscriptionKey, sequence); 1435 metadata.lastUpdate = ackLocation; 1436 } else if (LOG.isDebugEnabled()) { 1437 LOG.debug("no message sequence exists for id: " + command.getMessageId() + " and sub: " + command.getSubscriptionKey()); 1438 } 1439 1440 } 1441 } 1442 1443 private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) { 1444 Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId())); 1445 if (referenceFileIds == null) { 1446 referenceFileIds = new HashSet<Integer>(); 1447 referenceFileIds.add(messageLocation.getDataFileId()); 1448 metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds); 1449 } else { 1450 Integer id = Integer.valueOf(messageLocation.getDataFileId()); 1451 if (!referenceFileIds.contains(id)) { 1452 referenceFileIds.add(id); 1453 } 1454 } 1455 } 1456 1457 void updateIndex(Transaction tx, KahaRemoveDestinationCommand command, Location location) throws IOException { 1458 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1459 sd.orderIndex.remove(tx); 1460 1461 sd.locationIndex.clear(tx); 1462 sd.locationIndex.unload(tx); 1463 tx.free(sd.locationIndex.getPageId()); 1464 1465 sd.messageIdIndex.clear(tx); 1466 sd.messageIdIndex.unload(tx); 1467 tx.free(sd.messageIdIndex.getPageId()); 1468 1469 if (sd.subscriptions != null) { 1470 sd.subscriptions.clear(tx); 1471 sd.subscriptions.unload(tx); 1472 tx.free(sd.subscriptions.getPageId()); 1473 1474 sd.subscriptionAcks.clear(tx); 1475 sd.subscriptionAcks.unload(tx); 1476 tx.free(sd.subscriptionAcks.getPageId()); 1477 1478 sd.ackPositions.clear(tx); 1479 sd.ackPositions.unload(tx); 1480 tx.free(sd.ackPositions.getHeadPageId()); 1481 1482 sd.subLocations.clear(tx); 1483 sd.subLocations.unload(tx); 1484 tx.free(sd.subLocations.getHeadPageId()); 1485 } 1486 1487 String key = key(command.getDestination()); 1488 storedDestinations.remove(key); 1489 metadata.destinations.remove(tx, key); 1490 clearStoreStats(command.getDestination()); 1491 storeCache.remove(key(command.getDestination())); 1492 } 1493 1494 void updateIndex(Transaction tx, KahaSubscriptionCommand command, Location location) throws IOException { 1495 StoredDestination sd = getStoredDestination(command.getDestination(), tx); 1496 final String subscriptionKey = command.getSubscriptionKey(); 1497 1498 // If set then we are creating it.. otherwise we are destroying the sub 1499 if (command.hasSubscriptionInfo()) { 1500 sd.subscriptions.put(tx, subscriptionKey, command); 1501 sd.subLocations.put(tx, subscriptionKey, location); 1502 long ackLocation=NOT_ACKED; 1503 if (!command.getRetroactive()) { 1504 ackLocation = sd.orderIndex.nextMessageId-1; 1505 } else { 1506 addAckLocationForRetroactiveSub(tx, sd, subscriptionKey); 1507 } 1508 sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(ackLocation)); 1509 sd.subscriptionCache.add(subscriptionKey); 1510 } else { 1511 // delete the sub... 1512 sd.subscriptions.remove(tx, subscriptionKey); 1513 sd.subLocations.remove(tx, subscriptionKey); 1514 sd.subscriptionAcks.remove(tx, subscriptionKey); 1515 sd.subscriptionCache.remove(subscriptionKey); 1516 removeAckLocationsForSub(command, tx, sd, subscriptionKey); 1517 1518 if (sd.subscriptions.isEmpty(tx)) { 1519 // remove the stored destination 1520 KahaRemoveDestinationCommand removeDestinationCommand = new KahaRemoveDestinationCommand(); 1521 removeDestinationCommand.setDestination(command.getDestination()); 1522 updateIndex(tx, removeDestinationCommand, null); 1523 clearStoreStats(command.getDestination()); 1524 } 1525 } 1526 } 1527 1528 private void checkpointUpdate(final boolean cleanup) throws IOException { 1529 checkpointLock.writeLock().lock(); 1530 try { 1531 this.indexLock.writeLock().lock(); 1532 try { 1533 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1534 @Override 1535 public void execute(Transaction tx) throws IOException { 1536 checkpointUpdate(tx, cleanup); 1537 } 1538 }); 1539 } finally { 1540 this.indexLock.writeLock().unlock(); 1541 } 1542 1543 } finally { 1544 checkpointLock.writeLock().unlock(); 1545 } 1546 } 1547 1548 /** 1549 * @param tx 1550 * @throws IOException 1551 */ 1552 void checkpointUpdate(Transaction tx, boolean cleanup) throws IOException { 1553 LOG.debug("Checkpoint started."); 1554 1555 // reflect last update exclusive of current checkpoint 1556 Location lastUpdate = metadata.lastUpdate; 1557 1558 metadata.state = OPEN_STATE; 1559 metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit(); 1560 metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap(); 1561 Location[] inProgressTxRange = getInProgressTxLocationRange(); 1562 metadata.firstInProgressTransactionLocation = inProgressTxRange[0]; 1563 tx.store(metadata.page, metadataMarshaller, true); 1564 pageFile.flush(); 1565 1566 if( cleanup ) { 1567 1568 final TreeSet<Integer> completeFileSet = new TreeSet<Integer>(journal.getFileMap().keySet()); 1569 final TreeSet<Integer> gcCandidateSet = new TreeSet<Integer>(completeFileSet); 1570 1571 if (LOG.isTraceEnabled()) { 1572 LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet); 1573 } 1574 1575 if (lastUpdate != null) { 1576 gcCandidateSet.remove(lastUpdate.getDataFileId()); 1577 } 1578 1579 // Don't GC files under replication 1580 if( journalFilesBeingReplicated!=null ) { 1581 gcCandidateSet.removeAll(journalFilesBeingReplicated); 1582 } 1583 1584 if (metadata.producerSequenceIdTrackerLocation != null) { 1585 int dataFileId = metadata.producerSequenceIdTrackerLocation.getDataFileId(); 1586 if (gcCandidateSet.contains(dataFileId) && gcCandidateSet.first() == dataFileId) { 1587 // rewrite so we don't prevent gc 1588 metadata.producerSequenceIdTracker.setModified(true); 1589 if (LOG.isTraceEnabled()) { 1590 LOG.trace("rewriting producerSequenceIdTracker:" + metadata.producerSequenceIdTrackerLocation); 1591 } 1592 } 1593 gcCandidateSet.remove(dataFileId); 1594 if (LOG.isTraceEnabled()) { 1595 LOG.trace("gc candidates after producerSequenceIdTrackerLocation:" + dataFileId + ", " + gcCandidateSet); 1596 } 1597 } 1598 1599 if (metadata.ackMessageFileMapLocation != null) { 1600 int dataFileId = metadata.ackMessageFileMapLocation.getDataFileId(); 1601 gcCandidateSet.remove(dataFileId); 1602 if (LOG.isTraceEnabled()) { 1603 LOG.trace("gc candidates after ackMessageFileMapLocation:" + dataFileId + ", " + gcCandidateSet); 1604 } 1605 } 1606 1607 // Don't GC files referenced by in-progress tx 1608 if (inProgressTxRange[0] != null) { 1609 for (int pendingTx=inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) { 1610 gcCandidateSet.remove(pendingTx); 1611 } 1612 } 1613 if (LOG.isTraceEnabled()) { 1614 LOG.trace("gc candidates after tx range:" + Arrays.asList(inProgressTxRange) + ", " + gcCandidateSet); 1615 } 1616 1617 // Go through all the destinations to see if any of them can remove GC candidates. 1618 for (Entry<String, StoredDestination> entry : storedDestinations.entrySet()) { 1619 if( gcCandidateSet.isEmpty() ) { 1620 break; 1621 } 1622 1623 // Use a visitor to cut down the number of pages that we load 1624 entry.getValue().locationIndex.visit(tx, new BTreeVisitor<Location, Long>() { 1625 int last=-1; 1626 @Override 1627 public boolean isInterestedInKeysBetween(Location first, Location second) { 1628 if( first==null ) { 1629 SortedSet<Integer> subset = gcCandidateSet.headSet(second.getDataFileId()+1); 1630 if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) { 1631 subset.remove(second.getDataFileId()); 1632 } 1633 return !subset.isEmpty(); 1634 } else if( second==null ) { 1635 SortedSet<Integer> subset = gcCandidateSet.tailSet(first.getDataFileId()); 1636 if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) { 1637 subset.remove(first.getDataFileId()); 1638 } 1639 return !subset.isEmpty(); 1640 } else { 1641 SortedSet<Integer> subset = gcCandidateSet.subSet(first.getDataFileId(), second.getDataFileId()+1); 1642 if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) { 1643 subset.remove(first.getDataFileId()); 1644 } 1645 if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) { 1646 subset.remove(second.getDataFileId()); 1647 } 1648 return !subset.isEmpty(); 1649 } 1650 } 1651 1652 @Override 1653 public void visit(List<Location> keys, List<Long> values) { 1654 for (Location l : keys) { 1655 int fileId = l.getDataFileId(); 1656 if( last != fileId ) { 1657 gcCandidateSet.remove(fileId); 1658 last = fileId; 1659 } 1660 } 1661 } 1662 }); 1663 1664 // Durable Subscription 1665 if (entry.getValue().subLocations != null) { 1666 Iterator<Entry<String, Location>> iter = entry.getValue().subLocations.iterator(tx); 1667 while (iter.hasNext()) { 1668 Entry<String, Location> subscription = iter.next(); 1669 int dataFileId = subscription.getValue().getDataFileId(); 1670 1671 // Move subscription along if it has no outstanding messages that need ack'd 1672 // and its in the last log file in the journal. 1673 if (!gcCandidateSet.isEmpty() && gcCandidateSet.first() == dataFileId) { 1674 final StoredDestination destination = entry.getValue(); 1675 final String subscriptionKey = subscription.getKey(); 1676 SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey); 1677 1678 // When pending is size one that is the next message Id meaning there 1679 // are no pending messages currently. 1680 if (pendingAcks == null || pendingAcks.size() <= 1) { 1681 if (LOG.isTraceEnabled()) { 1682 LOG.trace("Found candidate for rewrite: {} from file {}", entry.getKey(), dataFileId); 1683 } 1684 1685 final KahaSubscriptionCommand kahaSub = 1686 destination.subscriptions.get(tx, subscriptionKey); 1687 destination.subLocations.put( 1688 tx, subscriptionKey, checkpointSubscriptionCommand(kahaSub)); 1689 1690 // Skips the remove from candidates if we rewrote the subscription 1691 // in order to prevent duplicate subscription commands on recover. 1692 // If another subscription is on the same file and isn't rewritten 1693 // than it will remove the file from the set. 1694 continue; 1695 } 1696 } 1697 1698 gcCandidateSet.remove(dataFileId); 1699 } 1700 } 1701 1702 if (LOG.isTraceEnabled()) { 1703 LOG.trace("gc candidates after dest:" + entry.getKey() + ", " + gcCandidateSet); 1704 } 1705 } 1706 1707 // check we are not deleting file with ack for in-use journal files 1708 if (LOG.isTraceEnabled()) { 1709 LOG.trace("gc candidates: " + gcCandidateSet); 1710 } 1711 Iterator<Integer> candidates = gcCandidateSet.iterator(); 1712 while (candidates.hasNext()) { 1713 Integer candidate = candidates.next(); 1714 Set<Integer> referencedFileIds = metadata.ackMessageFileMap.get(candidate); 1715 if (referencedFileIds != null) { 1716 for (Integer referencedFileId : referencedFileIds) { 1717 if (completeFileSet.contains(referencedFileId) && !gcCandidateSet.contains(referencedFileId)) { 1718 // active file that is not targeted for deletion is referenced so don't delete 1719 candidates.remove(); 1720 break; 1721 } 1722 } 1723 if (gcCandidateSet.contains(candidate)) { 1724 metadata.ackMessageFileMap.remove(candidate); 1725 } else { 1726 if (LOG.isTraceEnabled()) { 1727 LOG.trace("not removing data file: " + candidate 1728 + " as contained ack(s) refer to referenced file: " + referencedFileIds); 1729 } 1730 } 1731 } 1732 } 1733 1734 if (!gcCandidateSet.isEmpty()) { 1735 if (LOG.isDebugEnabled()) { 1736 LOG.debug("Cleanup removing the data files: " + gcCandidateSet); 1737 } 1738 journal.removeDataFiles(gcCandidateSet); 1739 } 1740 } 1741 1742 LOG.debug("Checkpoint done."); 1743 } 1744 1745 final Runnable nullCompletionCallback = new Runnable() { 1746 @Override 1747 public void run() { 1748 } 1749 }; 1750 1751 private Location checkpointProducerAudit() throws IOException { 1752 if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) { 1753 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 1754 ObjectOutputStream oout = new ObjectOutputStream(baos); 1755 oout.writeObject(metadata.producerSequenceIdTracker); 1756 oout.flush(); 1757 oout.close(); 1758 // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false 1759 Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback); 1760 try { 1761 location.getLatch().await(); 1762 } catch (InterruptedException e) { 1763 throw new InterruptedIOException(e.toString()); 1764 } 1765 return location; 1766 } 1767 return metadata.producerSequenceIdTrackerLocation; 1768 } 1769 1770 private Location checkpointAckMessageFileMap() throws IOException { 1771 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 1772 ObjectOutputStream oout = new ObjectOutputStream(baos); 1773 oout.writeObject(metadata.ackMessageFileMap); 1774 oout.flush(); 1775 oout.close(); 1776 // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false 1777 Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback); 1778 try { 1779 location.getLatch().await(); 1780 } catch (InterruptedException e) { 1781 throw new InterruptedIOException(e.toString()); 1782 } 1783 return location; 1784 } 1785 1786 private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException { 1787 1788 ByteSequence sequence = toByteSequence(subscription); 1789 Location location = journal.write(sequence, nullCompletionCallback) ; 1790 1791 try { 1792 location.getLatch().await(); 1793 } catch (InterruptedException e) { 1794 throw new InterruptedIOException(e.toString()); 1795 } 1796 return location; 1797 } 1798 1799 public HashSet<Integer> getJournalFilesBeingReplicated() { 1800 return journalFilesBeingReplicated; 1801 } 1802 1803 // ///////////////////////////////////////////////////////////////// 1804 // StoredDestination related implementation methods. 1805 // ///////////////////////////////////////////////////////////////// 1806 1807 protected final HashMap<String, StoredDestination> storedDestinations = new HashMap<String, StoredDestination>(); 1808 1809 static class MessageKeys { 1810 final String messageId; 1811 final Location location; 1812 1813 public MessageKeys(String messageId, Location location) { 1814 this.messageId=messageId; 1815 this.location=location; 1816 } 1817 1818 @Override 1819 public String toString() { 1820 return "["+messageId+","+location+"]"; 1821 } 1822 } 1823 1824 static protected class MessageKeysMarshaller extends VariableMarshaller<MessageKeys> { 1825 static final MessageKeysMarshaller INSTANCE = new MessageKeysMarshaller(); 1826 1827 @Override 1828 public MessageKeys readPayload(DataInput dataIn) throws IOException { 1829 return new MessageKeys(dataIn.readUTF(), LocationMarshaller.INSTANCE.readPayload(dataIn)); 1830 } 1831 1832 @Override 1833 public void writePayload(MessageKeys object, DataOutput dataOut) throws IOException { 1834 dataOut.writeUTF(object.messageId); 1835 LocationMarshaller.INSTANCE.writePayload(object.location, dataOut); 1836 } 1837 } 1838 1839 class LastAck { 1840 long lastAckedSequence; 1841 byte priority; 1842 1843 public LastAck(LastAck source) { 1844 this.lastAckedSequence = source.lastAckedSequence; 1845 this.priority = source.priority; 1846 } 1847 1848 public LastAck() { 1849 this.priority = MessageOrderIndex.HI; 1850 } 1851 1852 public LastAck(long ackLocation) { 1853 this.lastAckedSequence = ackLocation; 1854 this.priority = MessageOrderIndex.LO; 1855 } 1856 1857 public LastAck(long ackLocation, byte priority) { 1858 this.lastAckedSequence = ackLocation; 1859 this.priority = priority; 1860 } 1861 1862 @Override 1863 public String toString() { 1864 return "[" + lastAckedSequence + ":" + priority + "]"; 1865 } 1866 } 1867 1868 protected class LastAckMarshaller implements Marshaller<LastAck> { 1869 1870 @Override 1871 public void writePayload(LastAck object, DataOutput dataOut) throws IOException { 1872 dataOut.writeLong(object.lastAckedSequence); 1873 dataOut.writeByte(object.priority); 1874 } 1875 1876 @Override 1877 public LastAck readPayload(DataInput dataIn) throws IOException { 1878 LastAck lastAcked = new LastAck(); 1879 lastAcked.lastAckedSequence = dataIn.readLong(); 1880 if (metadata.version >= 3) { 1881 lastAcked.priority = dataIn.readByte(); 1882 } 1883 return lastAcked; 1884 } 1885 1886 @Override 1887 public int getFixedSize() { 1888 return 9; 1889 } 1890 1891 @Override 1892 public LastAck deepCopy(LastAck source) { 1893 return new LastAck(source); 1894 } 1895 1896 @Override 1897 public boolean isDeepCopySupported() { 1898 return true; 1899 } 1900 } 1901 1902 1903 class StoredDestination { 1904 1905 MessageOrderIndex orderIndex = new MessageOrderIndex(); 1906 BTreeIndex<Location, Long> locationIndex; 1907 BTreeIndex<String, Long> messageIdIndex; 1908 1909 // These bits are only set for Topics 1910 BTreeIndex<String, KahaSubscriptionCommand> subscriptions; 1911 BTreeIndex<String, LastAck> subscriptionAcks; 1912 HashMap<String, MessageOrderCursor> subscriptionCursors; 1913 ListIndex<String, SequenceSet> ackPositions; 1914 ListIndex<String, Location> subLocations; 1915 1916 // Transient data used to track which Messages are no longer needed. 1917 final TreeMap<Long, Long> messageReferences = new TreeMap<Long, Long>(); 1918 final HashSet<String> subscriptionCache = new LinkedHashSet<String>(); 1919 1920 public void trackPendingAdd(Long seq) { 1921 orderIndex.trackPendingAdd(seq); 1922 } 1923 1924 public void trackPendingAddComplete(Long seq) { 1925 orderIndex.trackPendingAddComplete(seq); 1926 } 1927 1928 @Override 1929 public String toString() { 1930 return "nextSeq:" + orderIndex.nextMessageId + ",lastRet:" + orderIndex.cursor + ",pending:" + orderIndex.pendingAdditions.size(); 1931 } 1932 } 1933 1934 protected class StoredDestinationMarshaller extends VariableMarshaller<StoredDestination> { 1935 1936 final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller(); 1937 1938 @Override 1939 public StoredDestination readPayload(final DataInput dataIn) throws IOException { 1940 final StoredDestination value = new StoredDestination(); 1941 value.orderIndex.defaultPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 1942 value.locationIndex = new BTreeIndex<Location, Long>(pageFile, dataIn.readLong()); 1943 value.messageIdIndex = new BTreeIndex<String, Long>(pageFile, dataIn.readLong()); 1944 1945 if (dataIn.readBoolean()) { 1946 value.subscriptions = new BTreeIndex<String, KahaSubscriptionCommand>(pageFile, dataIn.readLong()); 1947 value.subscriptionAcks = new BTreeIndex<String, LastAck>(pageFile, dataIn.readLong()); 1948 if (metadata.version >= 4) { 1949 value.ackPositions = new ListIndex<String, SequenceSet>(pageFile, dataIn.readLong()); 1950 } else { 1951 // upgrade 1952 pageFile.tx().execute(new Transaction.Closure<IOException>() { 1953 @Override 1954 public void execute(Transaction tx) throws IOException { 1955 LinkedHashMap<String, SequenceSet> temp = new LinkedHashMap<String, SequenceSet>(); 1956 1957 if (metadata.version >= 3) { 1958 // migrate 1959 BTreeIndex<Long, HashSet<String>> oldAckPositions = 1960 new BTreeIndex<Long, HashSet<String>>(pageFile, dataIn.readLong()); 1961 oldAckPositions.setKeyMarshaller(LongMarshaller.INSTANCE); 1962 oldAckPositions.setValueMarshaller(HashSetStringMarshaller.INSTANCE); 1963 oldAckPositions.load(tx); 1964 1965 1966 // Do the initial build of the data in memory before writing into the store 1967 // based Ack Positions List to avoid a lot of disk thrashing. 1968 Iterator<Entry<Long, HashSet<String>>> iterator = oldAckPositions.iterator(tx); 1969 while (iterator.hasNext()) { 1970 Entry<Long, HashSet<String>> entry = iterator.next(); 1971 1972 for(String subKey : entry.getValue()) { 1973 SequenceSet pendingAcks = temp.get(subKey); 1974 if (pendingAcks == null) { 1975 pendingAcks = new SequenceSet(); 1976 temp.put(subKey, pendingAcks); 1977 } 1978 1979 pendingAcks.add(entry.getKey()); 1980 } 1981 } 1982 } 1983 // Now move the pending messages to ack data into the store backed 1984 // structure. 1985 value.ackPositions = new ListIndex<String, SequenceSet>(pageFile, tx.allocate()); 1986 value.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE); 1987 value.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE); 1988 value.ackPositions.load(tx); 1989 for(String subscriptionKey : temp.keySet()) { 1990 value.ackPositions.put(tx, subscriptionKey, temp.get(subscriptionKey)); 1991 } 1992 1993 } 1994 }); 1995 } 1996 1997 if (metadata.version >= 5) { 1998 value.subLocations = new ListIndex<String, Location>(pageFile, dataIn.readLong()); 1999 } else { 2000 // upgrade 2001 pageFile.tx().execute(new Transaction.Closure<IOException>() { 2002 @Override 2003 public void execute(Transaction tx) throws IOException { 2004 value.subLocations = new ListIndex<String, Location>(pageFile, tx.allocate()); 2005 value.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE); 2006 value.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE); 2007 value.subLocations.load(tx); 2008 } 2009 }); 2010 } 2011 } 2012 if (metadata.version >= 2) { 2013 value.orderIndex.lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 2014 value.orderIndex.highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, dataIn.readLong()); 2015 } else { 2016 // upgrade 2017 pageFile.tx().execute(new Transaction.Closure<IOException>() { 2018 @Override 2019 public void execute(Transaction tx) throws IOException { 2020 value.orderIndex.lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2021 value.orderIndex.lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2022 value.orderIndex.lowPriorityIndex.setValueMarshaller(messageKeysMarshaller); 2023 value.orderIndex.lowPriorityIndex.load(tx); 2024 2025 value.orderIndex.highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 2026 value.orderIndex.highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 2027 value.orderIndex.highPriorityIndex.setValueMarshaller(messageKeysMarshaller); 2028 value.orderIndex.highPriorityIndex.load(tx); 2029 } 2030 }); 2031 } 2032 2033 return value; 2034 } 2035 2036 @Override 2037 public void writePayload(StoredDestination value, DataOutput dataOut) throws IOException { 2038 dataOut.writeLong(value.orderIndex.defaultPriorityIndex.getPageId()); 2039 dataOut.writeLong(value.locationIndex.getPageId()); 2040 dataOut.writeLong(value.messageIdIndex.getPageId()); 2041 if (value.subscriptions != null) { 2042 dataOut.writeBoolean(true); 2043 dataOut.writeLong(value.subscriptions.getPageId()); 2044 dataOut.writeLong(value.subscriptionAcks.getPageId()); 2045 dataOut.writeLong(value.ackPositions.getHeadPageId()); 2046 dataOut.writeLong(value.subLocations.getHeadPageId()); 2047 } else { 2048 dataOut.writeBoolean(false); 2049 } 2050 dataOut.writeLong(value.orderIndex.lowPriorityIndex.getPageId()); 2051 dataOut.writeLong(value.orderIndex.highPriorityIndex.getPageId()); 2052 } 2053 } 2054 2055 static class KahaSubscriptionCommandMarshaller extends VariableMarshaller<KahaSubscriptionCommand> { 2056 final static KahaSubscriptionCommandMarshaller INSTANCE = new KahaSubscriptionCommandMarshaller(); 2057 2058 @Override 2059 public KahaSubscriptionCommand readPayload(DataInput dataIn) throws IOException { 2060 KahaSubscriptionCommand rc = new KahaSubscriptionCommand(); 2061 rc.mergeFramed((InputStream)dataIn); 2062 return rc; 2063 } 2064 2065 @Override 2066 public void writePayload(KahaSubscriptionCommand object, DataOutput dataOut) throws IOException { 2067 object.writeFramed((OutputStream)dataOut); 2068 } 2069 } 2070 2071 protected StoredDestination getStoredDestination(KahaDestination destination, Transaction tx) throws IOException { 2072 String key = key(destination); 2073 StoredDestination rc = storedDestinations.get(key); 2074 if (rc == null) { 2075 boolean topic = destination.getType() == KahaDestination.DestinationType.TOPIC || destination.getType() == KahaDestination.DestinationType.TEMP_TOPIC; 2076 rc = loadStoredDestination(tx, key, topic); 2077 // Cache it. We may want to remove/unload destinations from the 2078 // cache that are not used for a while 2079 // to reduce memory usage. 2080 storedDestinations.put(key, rc); 2081 } 2082 return rc; 2083 } 2084 2085 protected StoredDestination getExistingStoredDestination(KahaDestination destination, Transaction tx) throws IOException { 2086 String key = key(destination); 2087 StoredDestination rc = storedDestinations.get(key); 2088 if (rc == null && metadata.destinations.containsKey(tx, key)) { 2089 rc = getStoredDestination(destination, tx); 2090 } 2091 return rc; 2092 } 2093 2094 /** 2095 * @param tx 2096 * @param key 2097 * @param topic 2098 * @return 2099 * @throws IOException 2100 */ 2101 private StoredDestination loadStoredDestination(Transaction tx, String key, boolean topic) throws IOException { 2102 // Try to load the existing indexes.. 2103 StoredDestination rc = metadata.destinations.get(tx, key); 2104 if (rc == null) { 2105 // Brand new destination.. allocate indexes for it. 2106 rc = new StoredDestination(); 2107 rc.orderIndex.allocate(tx); 2108 rc.locationIndex = new BTreeIndex<Location, Long>(pageFile, tx.allocate()); 2109 rc.messageIdIndex = new BTreeIndex<String, Long>(pageFile, tx.allocate()); 2110 2111 if (topic) { 2112 rc.subscriptions = new BTreeIndex<String, KahaSubscriptionCommand>(pageFile, tx.allocate()); 2113 rc.subscriptionAcks = new BTreeIndex<String, LastAck>(pageFile, tx.allocate()); 2114 rc.ackPositions = new ListIndex<String, SequenceSet>(pageFile, tx.allocate()); 2115 rc.subLocations = new ListIndex<String, Location>(pageFile, tx.allocate()); 2116 } 2117 metadata.destinations.put(tx, key, rc); 2118 } 2119 2120 // Configure the marshalers and load. 2121 rc.orderIndex.load(tx); 2122 2123 // Figure out the next key using the last entry in the destination. 2124 rc.orderIndex.configureLast(tx); 2125 2126 rc.locationIndex.setKeyMarshaller(new LocationSizeMarshaller()); 2127 rc.locationIndex.setValueMarshaller(LongMarshaller.INSTANCE); 2128 rc.locationIndex.load(tx); 2129 2130 rc.messageIdIndex.setKeyMarshaller(StringMarshaller.INSTANCE); 2131 rc.messageIdIndex.setValueMarshaller(LongMarshaller.INSTANCE); 2132 rc.messageIdIndex.load(tx); 2133 2134 //go through an upgrade old index if older than version 6 2135 if (metadata.version < 6) { 2136 for (Iterator<Entry<Location, Long>> iterator = rc.locationIndex.iterator(tx); iterator.hasNext(); ) { 2137 Entry<Location, Long> entry = iterator.next(); 2138 // modify so it is upgraded 2139 rc.locationIndex.put(tx, entry.getKey(), entry.getValue()); 2140 } 2141 } 2142 2143 // If it was a topic... 2144 if (topic) { 2145 2146 rc.subscriptions.setKeyMarshaller(StringMarshaller.INSTANCE); 2147 rc.subscriptions.setValueMarshaller(KahaSubscriptionCommandMarshaller.INSTANCE); 2148 rc.subscriptions.load(tx); 2149 2150 rc.subscriptionAcks.setKeyMarshaller(StringMarshaller.INSTANCE); 2151 rc.subscriptionAcks.setValueMarshaller(new LastAckMarshaller()); 2152 rc.subscriptionAcks.load(tx); 2153 2154 rc.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE); 2155 rc.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE); 2156 rc.ackPositions.load(tx); 2157 2158 rc.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE); 2159 rc.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE); 2160 rc.subLocations.load(tx); 2161 2162 rc.subscriptionCursors = new HashMap<String, MessageOrderCursor>(); 2163 2164 if (metadata.version < 3) { 2165 2166 // on upgrade need to fill ackLocation with available messages past last ack 2167 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) { 2168 Entry<String, LastAck> entry = iterator.next(); 2169 for (Iterator<Entry<Long, MessageKeys>> orderIterator = 2170 rc.orderIndex.iterator(tx, new MessageOrderCursor(entry.getValue().lastAckedSequence)); orderIterator.hasNext(); ) { 2171 Long sequence = orderIterator.next().getKey(); 2172 addAckLocation(tx, rc, sequence, entry.getKey()); 2173 } 2174 // modify so it is upgraded 2175 rc.subscriptionAcks.put(tx, entry.getKey(), entry.getValue()); 2176 } 2177 } 2178 2179 // Configure the message references index 2180 Iterator<Entry<String, SequenceSet>> subscriptions = rc.ackPositions.iterator(tx); 2181 while (subscriptions.hasNext()) { 2182 Entry<String, SequenceSet> subscription = subscriptions.next(); 2183 SequenceSet pendingAcks = subscription.getValue(); 2184 if (pendingAcks != null && !pendingAcks.isEmpty()) { 2185 Long lastPendingAck = pendingAcks.getTail().getLast(); 2186 for (Long sequenceId : pendingAcks) { 2187 Long current = rc.messageReferences.get(sequenceId); 2188 if (current == null) { 2189 current = new Long(0); 2190 } 2191 2192 // We always add a trailing empty entry for the next position to start from 2193 // so we need to ensure we don't count that as a message reference on reload. 2194 if (!sequenceId.equals(lastPendingAck)) { 2195 current = current.longValue() + 1; 2196 } else { 2197 current = Long.valueOf(0L); 2198 } 2199 2200 rc.messageReferences.put(sequenceId, current); 2201 } 2202 } 2203 } 2204 2205 // Configure the subscription cache 2206 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) { 2207 Entry<String, LastAck> entry = iterator.next(); 2208 rc.subscriptionCache.add(entry.getKey()); 2209 } 2210 2211 if (rc.orderIndex.nextMessageId == 0) { 2212 // check for existing durable sub all acked out - pull next seq from acks as messages are gone 2213 if (!rc.subscriptionAcks.isEmpty(tx)) { 2214 for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext();) { 2215 Entry<String, LastAck> entry = iterator.next(); 2216 rc.orderIndex.nextMessageId = 2217 Math.max(rc.orderIndex.nextMessageId, entry.getValue().lastAckedSequence +1); 2218 } 2219 } 2220 } else { 2221 // update based on ackPositions for unmatched, last entry is always the next 2222 if (!rc.messageReferences.isEmpty()) { 2223 Long nextMessageId = (Long) rc.messageReferences.keySet().toArray()[rc.messageReferences.size() - 1]; 2224 rc.orderIndex.nextMessageId = 2225 Math.max(rc.orderIndex.nextMessageId, nextMessageId); 2226 } 2227 } 2228 } 2229 2230 if (metadata.version < VERSION) { 2231 // store again after upgrade 2232 metadata.destinations.put(tx, key, rc); 2233 } 2234 return rc; 2235 } 2236 2237 /** 2238 * Clear the counter for the destination, if one exists. 2239 * 2240 * @param kahaDestination 2241 */ 2242 protected void clearStoreStats(KahaDestination kahaDestination) { 2243 MessageStoreStatistics storeStats = getStoreStats(key(kahaDestination)); 2244 if (storeStats != null) { 2245 storeStats.reset(); 2246 } 2247 } 2248 2249 /** 2250 * Update MessageStoreStatistics 2251 * 2252 * @param kahaDestination 2253 * @param size 2254 */ 2255 protected void incrementAndAddSizeToStoreStat(KahaDestination kahaDestination, long size) { 2256 incrementAndAddSizeToStoreStat(key(kahaDestination), size); 2257 } 2258 2259 protected void incrementAndAddSizeToStoreStat(String kahaDestKey, long size) { 2260 MessageStoreStatistics storeStats = getStoreStats(kahaDestKey); 2261 if (storeStats != null) { 2262 storeStats.getMessageCount().increment(); 2263 if (size > 0) { 2264 storeStats.getMessageSize().addSize(size); 2265 } 2266 } 2267 } 2268 2269 protected void decrementAndSubSizeToStoreStat(KahaDestination kahaDestination, long size) { 2270 decrementAndSubSizeToStoreStat(key(kahaDestination), size); 2271 } 2272 2273 protected void decrementAndSubSizeToStoreStat(String kahaDestKey, long size) { 2274 MessageStoreStatistics storeStats = getStoreStats(kahaDestKey); 2275 if (storeStats != null) { 2276 storeStats.getMessageCount().decrement(); 2277 if (size > 0) { 2278 storeStats.getMessageSize().addSize(-size); 2279 } 2280 } 2281 } 2282 2283 /** 2284 * This is a map to cache DestinationStatistics for a specific 2285 * KahaDestination key 2286 */ 2287 protected final ConcurrentMap<String, MessageStore> storeCache = 2288 new ConcurrentHashMap<String, MessageStore>(); 2289 2290 /** 2291 * Locate the storeMessageSize counter for this KahaDestination 2292 * @param kahaDestination 2293 * @return 2294 */ 2295 protected MessageStoreStatistics getStoreStats(String kahaDestKey) { 2296 MessageStoreStatistics storeStats = null; 2297 try { 2298 MessageStore messageStore = storeCache.get(kahaDestKey); 2299 if (messageStore != null) { 2300 storeStats = messageStore.getMessageStoreStatistics(); 2301 } 2302 } catch (Exception e1) { 2303 LOG.error("Getting size counter of destination failed", e1); 2304 } 2305 2306 return storeStats; 2307 } 2308 2309 /** 2310 * Determine whether this Destination matches the DestinationType 2311 * 2312 * @param destination 2313 * @param type 2314 * @return 2315 */ 2316 protected boolean matchType(Destination destination, 2317 KahaDestination.DestinationType type) { 2318 if (destination instanceof Topic 2319 && type.equals(KahaDestination.DestinationType.TOPIC)) { 2320 return true; 2321 } else if (destination instanceof Queue 2322 && type.equals(KahaDestination.DestinationType.QUEUE)) { 2323 return true; 2324 } 2325 return false; 2326 } 2327 2328 class LocationSizeMarshaller implements Marshaller<Location> { 2329 2330 public LocationSizeMarshaller() { 2331 2332 } 2333 2334 @Override 2335 public Location readPayload(DataInput dataIn) throws IOException { 2336 Location rc = new Location(); 2337 rc.setDataFileId(dataIn.readInt()); 2338 rc.setOffset(dataIn.readInt()); 2339 if (metadata.version >= 6) { 2340 rc.setSize(dataIn.readInt()); 2341 } 2342 return rc; 2343 } 2344 2345 @Override 2346 public void writePayload(Location object, DataOutput dataOut) 2347 throws IOException { 2348 dataOut.writeInt(object.getDataFileId()); 2349 dataOut.writeInt(object.getOffset()); 2350 dataOut.writeInt(object.getSize()); 2351 } 2352 2353 @Override 2354 public int getFixedSize() { 2355 return 12; 2356 } 2357 2358 @Override 2359 public Location deepCopy(Location source) { 2360 return new Location(source); 2361 } 2362 2363 @Override 2364 public boolean isDeepCopySupported() { 2365 return true; 2366 } 2367 } 2368 2369 private void addAckLocation(Transaction tx, StoredDestination sd, Long messageSequence, String subscriptionKey) throws IOException { 2370 SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey); 2371 if (sequences == null) { 2372 sequences = new SequenceSet(); 2373 sequences.add(messageSequence); 2374 sd.ackPositions.add(tx, subscriptionKey, sequences); 2375 } else { 2376 sequences.add(messageSequence); 2377 sd.ackPositions.put(tx, subscriptionKey, sequences); 2378 } 2379 2380 Long count = sd.messageReferences.get(messageSequence); 2381 if (count == null) { 2382 count = Long.valueOf(0L); 2383 } 2384 count = count.longValue() + 1; 2385 sd.messageReferences.put(messageSequence, count); 2386 } 2387 2388 // new sub is interested in potentially all existing messages 2389 private void addAckLocationForRetroactiveSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2390 SequenceSet allOutstanding = new SequenceSet(); 2391 Iterator<Map.Entry<String, SequenceSet>> iterator = sd.ackPositions.iterator(tx); 2392 while (iterator.hasNext()) { 2393 SequenceSet set = iterator.next().getValue(); 2394 for (Long entry : set) { 2395 allOutstanding.add(entry); 2396 } 2397 } 2398 sd.ackPositions.put(tx, subscriptionKey, allOutstanding); 2399 2400 for (Long ackPosition : allOutstanding) { 2401 Long count = sd.messageReferences.get(ackPosition); 2402 2403 // There might not be a reference if the ackLocation was the last 2404 // one which is a placeholder for the next incoming message and 2405 // no value was added to the message references table. 2406 if (count != null) { 2407 count = count.longValue() + 1; 2408 sd.messageReferences.put(ackPosition, count); 2409 } 2410 } 2411 } 2412 2413 // on a new message add, all existing subs are interested in this message 2414 private void addAckLocationForNewMessage(Transaction tx, StoredDestination sd, Long messageSequence) throws IOException { 2415 for(String subscriptionKey : sd.subscriptionCache) { 2416 SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey); 2417 if (sequences == null) { 2418 sequences = new SequenceSet(); 2419 sequences.add(new Sequence(messageSequence, messageSequence + 1)); 2420 sd.ackPositions.add(tx, subscriptionKey, sequences); 2421 } else { 2422 sequences.add(new Sequence(messageSequence, messageSequence + 1)); 2423 sd.ackPositions.put(tx, subscriptionKey, sequences); 2424 } 2425 2426 Long count = sd.messageReferences.get(messageSequence); 2427 if (count == null) { 2428 count = Long.valueOf(0L); 2429 } 2430 count = count.longValue() + 1; 2431 sd.messageReferences.put(messageSequence, count); 2432 sd.messageReferences.put(messageSequence + 1, Long.valueOf(0L)); 2433 } 2434 } 2435 2436 private void removeAckLocationsForSub(KahaSubscriptionCommand command, 2437 Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2438 if (!sd.ackPositions.isEmpty(tx)) { 2439 SequenceSet sequences = sd.ackPositions.remove(tx, subscriptionKey); 2440 if (sequences == null || sequences.isEmpty()) { 2441 return; 2442 } 2443 2444 ArrayList<Long> unreferenced = new ArrayList<Long>(); 2445 2446 for(Long sequenceId : sequences) { 2447 Long references = sd.messageReferences.get(sequenceId); 2448 if (references != null) { 2449 references = references.longValue() - 1; 2450 2451 if (references.longValue() > 0) { 2452 sd.messageReferences.put(sequenceId, references); 2453 } else { 2454 sd.messageReferences.remove(sequenceId); 2455 unreferenced.add(sequenceId); 2456 } 2457 } 2458 } 2459 2460 for(Long sequenceId : unreferenced) { 2461 // Find all the entries that need to get deleted. 2462 ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<Entry<Long, MessageKeys>>(); 2463 sd.orderIndex.getDeleteList(tx, deletes, sequenceId); 2464 2465 // Do the actual deletes. 2466 for (Entry<Long, MessageKeys> entry : deletes) { 2467 sd.locationIndex.remove(tx, entry.getValue().location); 2468 sd.messageIdIndex.remove(tx, entry.getValue().messageId); 2469 sd.orderIndex.remove(tx, entry.getKey()); 2470 decrementAndSubSizeToStoreStat(command.getDestination(), entry.getValue().location.getSize()); 2471 } 2472 } 2473 } 2474 } 2475 2476 /** 2477 * @param tx 2478 * @param sd 2479 * @param subscriptionKey 2480 * @param messageSequence 2481 * @throws IOException 2482 */ 2483 private void removeAckLocation(KahaRemoveMessageCommand command, 2484 Transaction tx, StoredDestination sd, String subscriptionKey, 2485 Long messageSequence) throws IOException { 2486 // Remove the sub from the previous location set.. 2487 if (messageSequence != null) { 2488 SequenceSet range = sd.ackPositions.get(tx, subscriptionKey); 2489 if (range != null && !range.isEmpty()) { 2490 range.remove(messageSequence); 2491 if (!range.isEmpty()) { 2492 sd.ackPositions.put(tx, subscriptionKey, range); 2493 } else { 2494 sd.ackPositions.remove(tx, subscriptionKey); 2495 } 2496 2497 // Check if the message is reference by any other subscription. 2498 Long count = sd.messageReferences.get(messageSequence); 2499 if (count != null) { 2500 long references = count.longValue() - 1; 2501 if (references > 0) { 2502 sd.messageReferences.put(messageSequence, Long.valueOf(references)); 2503 return; 2504 } else { 2505 sd.messageReferences.remove(messageSequence); 2506 } 2507 } 2508 2509 // Find all the entries that need to get deleted. 2510 ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<Entry<Long, MessageKeys>>(); 2511 sd.orderIndex.getDeleteList(tx, deletes, messageSequence); 2512 2513 // Do the actual deletes. 2514 for (Entry<Long, MessageKeys> entry : deletes) { 2515 sd.locationIndex.remove(tx, entry.getValue().location); 2516 sd.messageIdIndex.remove(tx, entry.getValue().messageId); 2517 sd.orderIndex.remove(tx, entry.getKey()); 2518 decrementAndSubSizeToStoreStat(command.getDestination(), entry.getValue().location.getSize()); 2519 } 2520 } 2521 } 2522 } 2523 2524 public LastAck getLastAck(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2525 return sd.subscriptionAcks.get(tx, subscriptionKey); 2526 } 2527 2528 public long getStoredMessageCount(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException { 2529 SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey); 2530 if (messageSequences != null) { 2531 long result = messageSequences.rangeSize(); 2532 // if there's anything in the range the last value is always the nextMessage marker, so remove 1. 2533 return result > 0 ? result - 1 : 0; 2534 } 2535 2536 return 0; 2537 } 2538 2539 protected String key(KahaDestination destination) { 2540 return destination.getType().getNumber() + ":" + destination.getName(); 2541 } 2542 2543 // ///////////////////////////////////////////////////////////////// 2544 // Transaction related implementation methods. 2545 // ///////////////////////////////////////////////////////////////// 2546 @SuppressWarnings("rawtypes") 2547 private final LinkedHashMap<TransactionId, List<Operation>> inflightTransactions = new LinkedHashMap<TransactionId, List<Operation>>(); 2548 @SuppressWarnings("rawtypes") 2549 protected final LinkedHashMap<TransactionId, List<Operation>> preparedTransactions = new LinkedHashMap<TransactionId, List<Operation>>(); 2550 protected final Set<String> ackedAndPrepared = new HashSet<String>(); 2551 protected final Set<String> rolledBackAcks = new HashSet<String>(); 2552 2553 // messages that have prepared (pending) acks cannot be re-dispatched unless the outcome is rollback, 2554 // till then they are skipped by the store. 2555 // 'at most once' XA guarantee 2556 public void trackRecoveredAcks(ArrayList<MessageAck> acks) { 2557 this.indexLock.writeLock().lock(); 2558 try { 2559 for (MessageAck ack : acks) { 2560 ackedAndPrepared.add(ack.getLastMessageId().toProducerKey()); 2561 } 2562 } finally { 2563 this.indexLock.writeLock().unlock(); 2564 } 2565 } 2566 2567 public void forgetRecoveredAcks(ArrayList<MessageAck> acks, boolean rollback) throws IOException { 2568 if (acks != null) { 2569 this.indexLock.writeLock().lock(); 2570 try { 2571 for (MessageAck ack : acks) { 2572 final String id = ack.getLastMessageId().toProducerKey(); 2573 ackedAndPrepared.remove(id); 2574 if (rollback) { 2575 rolledBackAcks.add(id); 2576 } 2577 } 2578 } finally { 2579 this.indexLock.writeLock().unlock(); 2580 } 2581 } 2582 } 2583 2584 @SuppressWarnings("rawtypes") 2585 private List<Operation> getInflightTx(KahaTransactionInfo info) { 2586 TransactionId key = TransactionIdConversion.convert(info); 2587 List<Operation> tx; 2588 synchronized (inflightTransactions) { 2589 tx = inflightTransactions.get(key); 2590 if (tx == null) { 2591 tx = Collections.synchronizedList(new ArrayList<Operation>()); 2592 inflightTransactions.put(key, tx); 2593 } 2594 } 2595 return tx; 2596 } 2597 2598 @SuppressWarnings("unused") 2599 private TransactionId key(KahaTransactionInfo transactionInfo) { 2600 return TransactionIdConversion.convert(transactionInfo); 2601 } 2602 2603 abstract class Operation <T extends JournalCommand<T>> { 2604 final T command; 2605 final Location location; 2606 2607 public Operation(T command, Location location) { 2608 this.command = command; 2609 this.location = location; 2610 } 2611 2612 public Location getLocation() { 2613 return location; 2614 } 2615 2616 public T getCommand() { 2617 return command; 2618 } 2619 2620 abstract public void execute(Transaction tx) throws IOException; 2621 } 2622 2623 class AddOperation extends Operation<KahaAddMessageCommand> { 2624 final IndexAware runWithIndexLock; 2625 public AddOperation(KahaAddMessageCommand command, Location location, IndexAware runWithIndexLock) { 2626 super(command, location); 2627 this.runWithIndexLock = runWithIndexLock; 2628 } 2629 2630 @Override 2631 public void execute(Transaction tx) throws IOException { 2632 long seq = updateIndex(tx, command, location); 2633 if (runWithIndexLock != null) { 2634 runWithIndexLock.sequenceAssignedWithIndexLocked(seq); 2635 } 2636 } 2637 2638 } 2639 2640 class RemoveOperation extends Operation<KahaRemoveMessageCommand> { 2641 2642 public RemoveOperation(KahaRemoveMessageCommand command, Location location) { 2643 super(command, location); 2644 } 2645 2646 @Override 2647 public void execute(Transaction tx) throws IOException { 2648 updateIndex(tx, command, location); 2649 } 2650 } 2651 2652 // ///////////////////////////////////////////////////////////////// 2653 // Initialization related implementation methods. 2654 // ///////////////////////////////////////////////////////////////// 2655 2656 private PageFile createPageFile() throws IOException { 2657 if( indexDirectory == null ) { 2658 indexDirectory = directory; 2659 } 2660 IOHelper.mkdirs(indexDirectory); 2661 PageFile index = new PageFile(indexDirectory, "db"); 2662 index.setEnableWriteThread(isEnableIndexWriteAsync()); 2663 index.setWriteBatchSize(getIndexWriteBatchSize()); 2664 index.setPageCacheSize(indexCacheSize); 2665 index.setUseLFRUEviction(isUseIndexLFRUEviction()); 2666 index.setLFUEvictionFactor(getIndexLFUEvictionFactor()); 2667 index.setEnableDiskSyncs(isEnableIndexDiskSyncs()); 2668 index.setEnableRecoveryFile(isEnableIndexRecoveryFile()); 2669 index.setEnablePageCaching(isEnableIndexPageCaching()); 2670 return index; 2671 } 2672 2673 private Journal createJournal() throws IOException { 2674 Journal manager = new Journal(); 2675 manager.setDirectory(directory); 2676 manager.setMaxFileLength(getJournalMaxFileLength()); 2677 manager.setCheckForCorruptionOnStartup(checkForCorruptJournalFiles); 2678 manager.setChecksum(checksumJournalFiles || checkForCorruptJournalFiles); 2679 manager.setWriteBatchSize(getJournalMaxWriteBatchSize()); 2680 manager.setArchiveDataLogs(isArchiveDataLogs()); 2681 manager.setSizeAccumulator(journalSize); 2682 manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs()); 2683 manager.setPreallocationScope(Journal.PreallocationScope.valueOf(preallocationScope.trim().toUpperCase())); 2684 manager.setPreallocationStrategy( 2685 Journal.PreallocationStrategy.valueOf(preallocationStrategy.trim().toUpperCase())); 2686 if (getDirectoryArchive() != null) { 2687 IOHelper.mkdirs(getDirectoryArchive()); 2688 manager.setDirectoryArchive(getDirectoryArchive()); 2689 } 2690 return manager; 2691 } 2692 2693 private Metadata createMetadata() { 2694 Metadata md = new Metadata(); 2695 md.producerSequenceIdTracker.setAuditDepth(getFailoverProducersAuditDepth()); 2696 md.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(getMaxFailoverProducersToTrack()); 2697 return md; 2698 } 2699 2700 public int getJournalMaxWriteBatchSize() { 2701 return journalMaxWriteBatchSize; 2702 } 2703 2704 public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) { 2705 this.journalMaxWriteBatchSize = journalMaxWriteBatchSize; 2706 } 2707 2708 public File getDirectory() { 2709 return directory; 2710 } 2711 2712 public void setDirectory(File directory) { 2713 this.directory = directory; 2714 } 2715 2716 public boolean isDeleteAllMessages() { 2717 return deleteAllMessages; 2718 } 2719 2720 public void setDeleteAllMessages(boolean deleteAllMessages) { 2721 this.deleteAllMessages = deleteAllMessages; 2722 } 2723 2724 public void setIndexWriteBatchSize(int setIndexWriteBatchSize) { 2725 this.setIndexWriteBatchSize = setIndexWriteBatchSize; 2726 } 2727 2728 public int getIndexWriteBatchSize() { 2729 return setIndexWriteBatchSize; 2730 } 2731 2732 public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) { 2733 this.enableIndexWriteAsync = enableIndexWriteAsync; 2734 } 2735 2736 boolean isEnableIndexWriteAsync() { 2737 return enableIndexWriteAsync; 2738 } 2739 2740 public boolean isEnableJournalDiskSyncs() { 2741 return enableJournalDiskSyncs; 2742 } 2743 2744 public void setEnableJournalDiskSyncs(boolean syncWrites) { 2745 this.enableJournalDiskSyncs = syncWrites; 2746 } 2747 2748 public long getCheckpointInterval() { 2749 return checkpointInterval; 2750 } 2751 2752 public void setCheckpointInterval(long checkpointInterval) { 2753 this.checkpointInterval = checkpointInterval; 2754 } 2755 2756 public long getCleanupInterval() { 2757 return cleanupInterval; 2758 } 2759 2760 public void setCleanupInterval(long cleanupInterval) { 2761 this.cleanupInterval = cleanupInterval; 2762 } 2763 2764 public void setJournalMaxFileLength(int journalMaxFileLength) { 2765 this.journalMaxFileLength = journalMaxFileLength; 2766 } 2767 2768 public int getJournalMaxFileLength() { 2769 return journalMaxFileLength; 2770 } 2771 2772 public void setMaxFailoverProducersToTrack(int maxFailoverProducersToTrack) { 2773 this.metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack); 2774 } 2775 2776 public int getMaxFailoverProducersToTrack() { 2777 return this.metadata.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack(); 2778 } 2779 2780 public void setFailoverProducersAuditDepth(int failoverProducersAuditDepth) { 2781 this.metadata.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth); 2782 } 2783 2784 public int getFailoverProducersAuditDepth() { 2785 return this.metadata.producerSequenceIdTracker.getAuditDepth(); 2786 } 2787 2788 public PageFile getPageFile() throws IOException { 2789 if (pageFile == null) { 2790 pageFile = createPageFile(); 2791 } 2792 return pageFile; 2793 } 2794 2795 public Journal getJournal() throws IOException { 2796 if (journal == null) { 2797 journal = createJournal(); 2798 } 2799 return journal; 2800 } 2801 2802 public boolean isFailIfDatabaseIsLocked() { 2803 return failIfDatabaseIsLocked; 2804 } 2805 2806 public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) { 2807 this.failIfDatabaseIsLocked = failIfDatabaseIsLocked; 2808 } 2809 2810 public boolean isIgnoreMissingJournalfiles() { 2811 return ignoreMissingJournalfiles; 2812 } 2813 2814 public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) { 2815 this.ignoreMissingJournalfiles = ignoreMissingJournalfiles; 2816 } 2817 2818 public int getIndexCacheSize() { 2819 return indexCacheSize; 2820 } 2821 2822 public void setIndexCacheSize(int indexCacheSize) { 2823 this.indexCacheSize = indexCacheSize; 2824 } 2825 2826 public boolean isCheckForCorruptJournalFiles() { 2827 return checkForCorruptJournalFiles; 2828 } 2829 2830 public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) { 2831 this.checkForCorruptJournalFiles = checkForCorruptJournalFiles; 2832 } 2833 2834 public boolean isChecksumJournalFiles() { 2835 return checksumJournalFiles; 2836 } 2837 2838 public void setChecksumJournalFiles(boolean checksumJournalFiles) { 2839 this.checksumJournalFiles = checksumJournalFiles; 2840 } 2841 2842 @Override 2843 public void setBrokerService(BrokerService brokerService) { 2844 this.brokerService = brokerService; 2845 } 2846 2847 /** 2848 * @return the archiveDataLogs 2849 */ 2850 public boolean isArchiveDataLogs() { 2851 return this.archiveDataLogs; 2852 } 2853 2854 /** 2855 * @param archiveDataLogs the archiveDataLogs to set 2856 */ 2857 public void setArchiveDataLogs(boolean archiveDataLogs) { 2858 this.archiveDataLogs = archiveDataLogs; 2859 } 2860 2861 /** 2862 * @return the directoryArchive 2863 */ 2864 public File getDirectoryArchive() { 2865 return this.directoryArchive; 2866 } 2867 2868 /** 2869 * @param directoryArchive the directoryArchive to set 2870 */ 2871 public void setDirectoryArchive(File directoryArchive) { 2872 this.directoryArchive = directoryArchive; 2873 } 2874 2875 public boolean isArchiveCorruptedIndex() { 2876 return archiveCorruptedIndex; 2877 } 2878 2879 public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) { 2880 this.archiveCorruptedIndex = archiveCorruptedIndex; 2881 } 2882 2883 public float getIndexLFUEvictionFactor() { 2884 return indexLFUEvictionFactor; 2885 } 2886 2887 public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) { 2888 this.indexLFUEvictionFactor = indexLFUEvictionFactor; 2889 } 2890 2891 public boolean isUseIndexLFRUEviction() { 2892 return useIndexLFRUEviction; 2893 } 2894 2895 public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) { 2896 this.useIndexLFRUEviction = useIndexLFRUEviction; 2897 } 2898 2899 public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) { 2900 this.enableIndexDiskSyncs = enableIndexDiskSyncs; 2901 } 2902 2903 public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) { 2904 this.enableIndexRecoveryFile = enableIndexRecoveryFile; 2905 } 2906 2907 public void setEnableIndexPageCaching(boolean enableIndexPageCaching) { 2908 this.enableIndexPageCaching = enableIndexPageCaching; 2909 } 2910 2911 public boolean isEnableIndexDiskSyncs() { 2912 return enableIndexDiskSyncs; 2913 } 2914 2915 public boolean isEnableIndexRecoveryFile() { 2916 return enableIndexRecoveryFile; 2917 } 2918 2919 public boolean isEnableIndexPageCaching() { 2920 return enableIndexPageCaching; 2921 } 2922 2923 // ///////////////////////////////////////////////////////////////// 2924 // Internal conversion methods. 2925 // ///////////////////////////////////////////////////////////////// 2926 2927 class MessageOrderCursor{ 2928 long defaultCursorPosition; 2929 long lowPriorityCursorPosition; 2930 long highPriorityCursorPosition; 2931 MessageOrderCursor(){ 2932 } 2933 2934 MessageOrderCursor(long position){ 2935 this.defaultCursorPosition=position; 2936 this.lowPriorityCursorPosition=position; 2937 this.highPriorityCursorPosition=position; 2938 } 2939 2940 MessageOrderCursor(MessageOrderCursor other){ 2941 this.defaultCursorPosition=other.defaultCursorPosition; 2942 this.lowPriorityCursorPosition=other.lowPriorityCursorPosition; 2943 this.highPriorityCursorPosition=other.highPriorityCursorPosition; 2944 } 2945 2946 MessageOrderCursor copy() { 2947 return new MessageOrderCursor(this); 2948 } 2949 2950 void reset() { 2951 this.defaultCursorPosition=0; 2952 this.highPriorityCursorPosition=0; 2953 this.lowPriorityCursorPosition=0; 2954 } 2955 2956 void increment() { 2957 if (defaultCursorPosition!=0) { 2958 defaultCursorPosition++; 2959 } 2960 if (highPriorityCursorPosition!=0) { 2961 highPriorityCursorPosition++; 2962 } 2963 if (lowPriorityCursorPosition!=0) { 2964 lowPriorityCursorPosition++; 2965 } 2966 } 2967 2968 @Override 2969 public String toString() { 2970 return "MessageOrderCursor:[def:" + defaultCursorPosition 2971 + ", low:" + lowPriorityCursorPosition 2972 + ", high:" + highPriorityCursorPosition + "]"; 2973 } 2974 2975 public void sync(MessageOrderCursor other) { 2976 this.defaultCursorPosition=other.defaultCursorPosition; 2977 this.lowPriorityCursorPosition=other.lowPriorityCursorPosition; 2978 this.highPriorityCursorPosition=other.highPriorityCursorPosition; 2979 } 2980 } 2981 2982 class MessageOrderIndex { 2983 static final byte HI = 9; 2984 static final byte LO = 0; 2985 static final byte DEF = 4; 2986 2987 long nextMessageId; 2988 BTreeIndex<Long, MessageKeys> defaultPriorityIndex; 2989 BTreeIndex<Long, MessageKeys> lowPriorityIndex; 2990 BTreeIndex<Long, MessageKeys> highPriorityIndex; 2991 final MessageOrderCursor cursor = new MessageOrderCursor(); 2992 Long lastDefaultKey; 2993 Long lastHighKey; 2994 Long lastLowKey; 2995 byte lastGetPriority; 2996 final List<Long> pendingAdditions = new LinkedList<Long>(); 2997 2998 MessageKeys remove(Transaction tx, Long key) throws IOException { 2999 MessageKeys result = defaultPriorityIndex.remove(tx, key); 3000 if (result == null && highPriorityIndex!=null) { 3001 result = highPriorityIndex.remove(tx, key); 3002 if (result ==null && lowPriorityIndex!=null) { 3003 result = lowPriorityIndex.remove(tx, key); 3004 } 3005 } 3006 return result; 3007 } 3008 3009 void load(Transaction tx) throws IOException { 3010 defaultPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 3011 defaultPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 3012 defaultPriorityIndex.load(tx); 3013 lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 3014 lowPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 3015 lowPriorityIndex.load(tx); 3016 highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE); 3017 highPriorityIndex.setValueMarshaller(MessageKeysMarshaller.INSTANCE); 3018 highPriorityIndex.load(tx); 3019 } 3020 3021 void allocate(Transaction tx) throws IOException { 3022 defaultPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 3023 if (metadata.version >= 2) { 3024 lowPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 3025 highPriorityIndex = new BTreeIndex<Long, MessageKeys>(pageFile, tx.allocate()); 3026 } 3027 } 3028 3029 void configureLast(Transaction tx) throws IOException { 3030 // Figure out the next key using the last entry in the destination. 3031 TreeSet<Long> orderedSet = new TreeSet<Long>(); 3032 3033 addLast(orderedSet, highPriorityIndex, tx); 3034 addLast(orderedSet, defaultPriorityIndex, tx); 3035 addLast(orderedSet, lowPriorityIndex, tx); 3036 3037 if (!orderedSet.isEmpty()) { 3038 nextMessageId = orderedSet.last() + 1; 3039 } 3040 } 3041 3042 private void addLast(TreeSet<Long> orderedSet, BTreeIndex<Long, MessageKeys> index, Transaction tx) throws IOException { 3043 if (index != null) { 3044 Entry<Long, MessageKeys> lastEntry = index.getLast(tx); 3045 if (lastEntry != null) { 3046 orderedSet.add(lastEntry.getKey()); 3047 } 3048 } 3049 } 3050 3051 void clear(Transaction tx) throws IOException { 3052 this.remove(tx); 3053 this.resetCursorPosition(); 3054 this.allocate(tx); 3055 this.load(tx); 3056 this.configureLast(tx); 3057 } 3058 3059 void remove(Transaction tx) throws IOException { 3060 defaultPriorityIndex.clear(tx); 3061 defaultPriorityIndex.unload(tx); 3062 tx.free(defaultPriorityIndex.getPageId()); 3063 if (lowPriorityIndex != null) { 3064 lowPriorityIndex.clear(tx); 3065 lowPriorityIndex.unload(tx); 3066 3067 tx.free(lowPriorityIndex.getPageId()); 3068 } 3069 if (highPriorityIndex != null) { 3070 highPriorityIndex.clear(tx); 3071 highPriorityIndex.unload(tx); 3072 tx.free(highPriorityIndex.getPageId()); 3073 } 3074 } 3075 3076 void resetCursorPosition() { 3077 this.cursor.reset(); 3078 lastDefaultKey = null; 3079 lastHighKey = null; 3080 lastLowKey = null; 3081 } 3082 3083 void setBatch(Transaction tx, Long sequence) throws IOException { 3084 if (sequence != null) { 3085 Long nextPosition = new Long(sequence.longValue() + 1); 3086 lastDefaultKey = sequence; 3087 cursor.defaultCursorPosition = nextPosition.longValue(); 3088 lastHighKey = sequence; 3089 cursor.highPriorityCursorPosition = nextPosition.longValue(); 3090 lastLowKey = sequence; 3091 cursor.lowPriorityCursorPosition = nextPosition.longValue(); 3092 } 3093 } 3094 3095 void setBatch(Transaction tx, LastAck last) throws IOException { 3096 setBatch(tx, last.lastAckedSequence); 3097 if (cursor.defaultCursorPosition == 0 3098 && cursor.highPriorityCursorPosition == 0 3099 && cursor.lowPriorityCursorPosition == 0) { 3100 long next = last.lastAckedSequence + 1; 3101 switch (last.priority) { 3102 case DEF: 3103 cursor.defaultCursorPosition = next; 3104 cursor.highPriorityCursorPosition = next; 3105 break; 3106 case HI: 3107 cursor.highPriorityCursorPosition = next; 3108 break; 3109 case LO: 3110 cursor.lowPriorityCursorPosition = next; 3111 cursor.defaultCursorPosition = next; 3112 cursor.highPriorityCursorPosition = next; 3113 break; 3114 } 3115 } 3116 } 3117 3118 void stoppedIterating() { 3119 if (lastDefaultKey!=null) { 3120 cursor.defaultCursorPosition=lastDefaultKey.longValue()+1; 3121 } 3122 if (lastHighKey!=null) { 3123 cursor.highPriorityCursorPosition=lastHighKey.longValue()+1; 3124 } 3125 if (lastLowKey!=null) { 3126 cursor.lowPriorityCursorPosition=lastLowKey.longValue()+1; 3127 } 3128 lastDefaultKey = null; 3129 lastHighKey = null; 3130 lastLowKey = null; 3131 } 3132 3133 void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, Long sequenceId) 3134 throws IOException { 3135 if (defaultPriorityIndex.containsKey(tx, sequenceId)) { 3136 getDeleteList(tx, deletes, defaultPriorityIndex, sequenceId); 3137 } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequenceId)) { 3138 getDeleteList(tx, deletes, highPriorityIndex, sequenceId); 3139 } else if (lowPriorityIndex != null && lowPriorityIndex.containsKey(tx, sequenceId)) { 3140 getDeleteList(tx, deletes, lowPriorityIndex, sequenceId); 3141 } 3142 } 3143 3144 void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, 3145 BTreeIndex<Long, MessageKeys> index, Long sequenceId) throws IOException { 3146 3147 Iterator<Entry<Long, MessageKeys>> iterator = index.iterator(tx, sequenceId, null); 3148 deletes.add(iterator.next()); 3149 } 3150 3151 long getNextMessageId(int priority) { 3152 return nextMessageId++; 3153 } 3154 3155 MessageKeys get(Transaction tx, Long key) throws IOException { 3156 MessageKeys result = defaultPriorityIndex.get(tx, key); 3157 if (result == null) { 3158 result = highPriorityIndex.get(tx, key); 3159 if (result == null) { 3160 result = lowPriorityIndex.get(tx, key); 3161 lastGetPriority = LO; 3162 } else { 3163 lastGetPriority = HI; 3164 } 3165 } else { 3166 lastGetPriority = DEF; 3167 } 3168 return result; 3169 } 3170 3171 MessageKeys put(Transaction tx, int priority, Long key, MessageKeys value) throws IOException { 3172 if (priority == javax.jms.Message.DEFAULT_PRIORITY) { 3173 return defaultPriorityIndex.put(tx, key, value); 3174 } else if (priority > javax.jms.Message.DEFAULT_PRIORITY) { 3175 return highPriorityIndex.put(tx, key, value); 3176 } else { 3177 return lowPriorityIndex.put(tx, key, value); 3178 } 3179 } 3180 3181 Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx) throws IOException{ 3182 return new MessageOrderIterator(tx,cursor,this); 3183 } 3184 3185 Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx, MessageOrderCursor m) throws IOException{ 3186 return new MessageOrderIterator(tx,m,this); 3187 } 3188 3189 public byte lastGetPriority() { 3190 return lastGetPriority; 3191 } 3192 3193 public boolean alreadyDispatched(Long sequence) { 3194 return (cursor.highPriorityCursorPosition > 0 && cursor.highPriorityCursorPosition >= sequence) || 3195 (cursor.defaultCursorPosition > 0 && cursor.defaultCursorPosition >= sequence) || 3196 (cursor.lowPriorityCursorPosition > 0 && cursor.lowPriorityCursorPosition >= sequence); 3197 } 3198 3199 public void trackPendingAdd(Long seq) { 3200 synchronized (pendingAdditions) { 3201 pendingAdditions.add(seq); 3202 } 3203 } 3204 3205 public void trackPendingAddComplete(Long seq) { 3206 synchronized (pendingAdditions) { 3207 pendingAdditions.remove(seq); 3208 } 3209 } 3210 3211 public Long minPendingAdd() { 3212 synchronized (pendingAdditions) { 3213 if (!pendingAdditions.isEmpty()) { 3214 return pendingAdditions.get(0); 3215 } else { 3216 return null; 3217 } 3218 } 3219 } 3220 3221 class MessageOrderIterator implements Iterator<Entry<Long, MessageKeys>>{ 3222 Iterator<Entry<Long, MessageKeys>>currentIterator; 3223 final Iterator<Entry<Long, MessageKeys>>highIterator; 3224 final Iterator<Entry<Long, MessageKeys>>defaultIterator; 3225 final Iterator<Entry<Long, MessageKeys>>lowIterator; 3226 3227 MessageOrderIterator(Transaction tx, MessageOrderCursor m, MessageOrderIndex messageOrderIndex) throws IOException { 3228 Long pendingAddLimiter = messageOrderIndex.minPendingAdd(); 3229 this.defaultIterator = defaultPriorityIndex.iterator(tx, m.defaultCursorPosition, pendingAddLimiter); 3230 if (highPriorityIndex != null) { 3231 this.highIterator = highPriorityIndex.iterator(tx, m.highPriorityCursorPosition, pendingAddLimiter); 3232 } else { 3233 this.highIterator = null; 3234 } 3235 if (lowPriorityIndex != null) { 3236 this.lowIterator = lowPriorityIndex.iterator(tx, m.lowPriorityCursorPosition, pendingAddLimiter); 3237 } else { 3238 this.lowIterator = null; 3239 } 3240 } 3241 3242 @Override 3243 public boolean hasNext() { 3244 if (currentIterator == null) { 3245 if (highIterator != null) { 3246 if (highIterator.hasNext()) { 3247 currentIterator = highIterator; 3248 return currentIterator.hasNext(); 3249 } 3250 if (defaultIterator.hasNext()) { 3251 currentIterator = defaultIterator; 3252 return currentIterator.hasNext(); 3253 } 3254 if (lowIterator.hasNext()) { 3255 currentIterator = lowIterator; 3256 return currentIterator.hasNext(); 3257 } 3258 return false; 3259 } else { 3260 currentIterator = defaultIterator; 3261 return currentIterator.hasNext(); 3262 } 3263 } 3264 if (highIterator != null) { 3265 if (currentIterator.hasNext()) { 3266 return true; 3267 } 3268 if (currentIterator == highIterator) { 3269 if (defaultIterator.hasNext()) { 3270 currentIterator = defaultIterator; 3271 return currentIterator.hasNext(); 3272 } 3273 if (lowIterator.hasNext()) { 3274 currentIterator = lowIterator; 3275 return currentIterator.hasNext(); 3276 } 3277 return false; 3278 } 3279 3280 if (currentIterator == defaultIterator) { 3281 if (lowIterator.hasNext()) { 3282 currentIterator = lowIterator; 3283 return currentIterator.hasNext(); 3284 } 3285 return false; 3286 } 3287 } 3288 return currentIterator.hasNext(); 3289 } 3290 3291 @Override 3292 public Entry<Long, MessageKeys> next() { 3293 Entry<Long, MessageKeys> result = currentIterator.next(); 3294 if (result != null) { 3295 Long key = result.getKey(); 3296 if (highIterator != null) { 3297 if (currentIterator == defaultIterator) { 3298 lastDefaultKey = key; 3299 } else if (currentIterator == highIterator) { 3300 lastHighKey = key; 3301 } else { 3302 lastLowKey = key; 3303 } 3304 } else { 3305 lastDefaultKey = key; 3306 } 3307 } 3308 return result; 3309 } 3310 3311 @Override 3312 public void remove() { 3313 throw new UnsupportedOperationException(); 3314 } 3315 } 3316 } 3317 3318 private static class HashSetStringMarshaller extends VariableMarshaller<HashSet<String>> { 3319 final static HashSetStringMarshaller INSTANCE = new HashSetStringMarshaller(); 3320 3321 @Override 3322 public void writePayload(HashSet<String> object, DataOutput dataOut) throws IOException { 3323 ByteArrayOutputStream baos = new ByteArrayOutputStream(); 3324 ObjectOutputStream oout = new ObjectOutputStream(baos); 3325 oout.writeObject(object); 3326 oout.flush(); 3327 oout.close(); 3328 byte[] data = baos.toByteArray(); 3329 dataOut.writeInt(data.length); 3330 dataOut.write(data); 3331 } 3332 3333 @Override 3334 @SuppressWarnings("unchecked") 3335 public HashSet<String> readPayload(DataInput dataIn) throws IOException { 3336 int dataLen = dataIn.readInt(); 3337 byte[] data = new byte[dataLen]; 3338 dataIn.readFully(data); 3339 ByteArrayInputStream bais = new ByteArrayInputStream(data); 3340 ObjectInputStream oin = new ObjectInputStream(bais); 3341 try { 3342 return (HashSet<String>) oin.readObject(); 3343 } catch (ClassNotFoundException cfe) { 3344 IOException ioe = new IOException("Failed to read HashSet<String>: " + cfe); 3345 ioe.initCause(cfe); 3346 throw ioe; 3347 } 3348 } 3349 } 3350 3351 public File getIndexDirectory() { 3352 return indexDirectory; 3353 } 3354 3355 public void setIndexDirectory(File indexDirectory) { 3356 this.indexDirectory = indexDirectory; 3357 } 3358 3359 interface IndexAware { 3360 public void sequenceAssignedWithIndexLocked(long index); 3361 } 3362 3363 public String getPreallocationScope() { 3364 return preallocationScope; 3365 } 3366 3367 public void setPreallocationScope(String preallocationScope) { 3368 this.preallocationScope = preallocationScope; 3369 } 3370 3371 public String getPreallocationStrategy() { 3372 return preallocationStrategy; 3373 } 3374 3375 public void setPreallocationStrategy(String preallocationStrategy) { 3376 this.preallocationStrategy = preallocationStrategy; 3377 } 3378}