001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.activemq.store.kahadb.disk.page; 018 019import org.apache.activemq.store.kahadb.disk.page.PageFile.PageWrite; 020import org.apache.activemq.store.kahadb.disk.util.*; 021import org.apache.activemq.util.ByteSequence; 022import org.apache.activemq.store.kahadb.disk.util.DataByteArrayInputStream; 023import org.apache.activemq.store.kahadb.disk.util.DataByteArrayOutputStream; 024import org.apache.activemq.util.IOHelper; 025import org.slf4j.Logger; 026import org.slf4j.LoggerFactory; 027 028import java.io.*; 029import java.util.Iterator; 030import java.util.NoSuchElementException; 031import java.util.TreeMap; 032 033/** 034 * The class used to read/update a PageFile object. Using a transaction allows you to 035 * do multiple update operations in a single unit of work. 036 */ 037public class Transaction implements Iterable<Page> { 038 039 private static final Logger LOG = LoggerFactory.getLogger(Transaction.class); 040 041 private RandomAccessFile tmpFile; 042 private File txFile; 043 private long nextLocation = 0; 044 045 /** 046 * The PageOverflowIOException occurs when a page write is requested 047 * and it's data is larger than what would fit into a single page. 048 */ 049 public class PageOverflowIOException extends IOException { 050 private static final long serialVersionUID = 1L; 051 052 public PageOverflowIOException(String message) { 053 super(message); 054 } 055 } 056 057 /** 058 * The InvalidPageIOException is thrown if try to load/store a a page 059 * with an invalid page id. 060 */ 061 public class InvalidPageIOException extends IOException { 062 private static final long serialVersionUID = 1L; 063 064 private final long page; 065 066 public InvalidPageIOException(String message, long page) { 067 super(message); 068 this.page = page; 069 } 070 071 public long getPage() { 072 return page; 073 } 074 } 075 076 /** 077 * This closure interface is intended for the end user implement callbacks for the Transaction.exectue() method. 078 * 079 * @param <T> The type of exceptions that operation will throw. 080 */ 081 public interface Closure <T extends Throwable> { 082 public void execute(Transaction tx) throws T; 083 } 084 085 /** 086 * This closure interface is intended for the end user implement callbacks for the Transaction.exectue() method. 087 * 088 * @param <R> The type of result that the closure produces. 089 * @param <T> The type of exceptions that operation will throw. 090 */ 091 public interface CallableClosure<R, T extends Throwable> { 092 public R execute(Transaction tx) throws T; 093 } 094 095 096 // The page file that this Transaction operates against. 097 private final PageFile pageFile; 098 // If this transaction is updating stuff.. this is the tx of 099 private long writeTransactionId=-1; 100 // List of pages that this transaction has modified. 101 private TreeMap<Long, PageWrite> writes=new TreeMap<Long, PageWrite>(); 102 // List of pages allocated in this transaction 103 private final SequenceSet allocateList = new SequenceSet(); 104 // List of pages freed in this transaction 105 private final SequenceSet freeList = new SequenceSet(); 106 107 private long maxTransactionSize = Long.getLong("maxKahaDBTxSize", 10485760L); 108 109 private long size = 0; 110 111 Transaction(PageFile pageFile) { 112 this.pageFile = pageFile; 113 } 114 115 /** 116 * @return the page file that created this Transaction 117 */ 118 public PageFile getPageFile() { 119 return this.pageFile; 120 } 121 122 /** 123 * Allocates a free page that you can write data to. 124 * 125 * @return a newly allocated page. 126 * @throws IOException 127 * If an disk error occurred. 128 * @throws IllegalStateException 129 * if the PageFile is not loaded 130 */ 131 public <T> Page<T> allocate() throws IOException { 132 return allocate(1); 133 } 134 135 /** 136 * Allocates a block of free pages that you can write data to. 137 * 138 * @param count the number of sequential pages to allocate 139 * @return the first page of the sequential set. 140 * @throws IOException 141 * If an disk error occurred. 142 * @throws IllegalStateException 143 * if the PageFile is not loaded 144 */ 145 public <T> Page<T> allocate(int count) throws IOException { 146 Page<T> rc = pageFile.allocate(count); 147 allocateList.add(new Sequence(rc.getPageId(), rc.getPageId()+count-1)); 148 return rc; 149 } 150 151 /** 152 * Frees up a previously allocated page so that it can be re-allocated again. 153 * 154 * @param pageId the page to free up 155 * @throws IOException 156 * If an disk error occurred. 157 * @throws IllegalStateException 158 * if the PageFile is not loaded 159 */ 160 public void free(long pageId) throws IOException { 161 free(load(pageId, null)); 162 } 163 164 /** 165 * Frees up a previously allocated sequence of pages so that it can be re-allocated again. 166 * 167 * @param pageId the initial page of the sequence that will be getting freed 168 * @param count the number of pages in the sequence 169 * 170 * @throws IOException 171 * If an disk error occurred. 172 * @throws IllegalStateException 173 * if the PageFile is not loaded 174 */ 175 public void free(long pageId, int count) throws IOException { 176 free(load(pageId, null), count); 177 } 178 179 /** 180 * Frees up a previously allocated sequence of pages so that it can be re-allocated again. 181 * 182 * @param page the initial page of the sequence that will be getting freed 183 * @param count the number of pages in the sequence 184 * 185 * @throws IOException 186 * If an disk error occurred. 187 * @throws IllegalStateException 188 * if the PageFile is not loaded 189 */ 190 public <T> void free(Page<T> page, int count) throws IOException { 191 pageFile.assertLoaded(); 192 long offsetPage = page.getPageId(); 193 while (count-- > 0) { 194 if (page == null) { 195 page = load(offsetPage, null); 196 } 197 free(page); 198 page = null; 199 // Increment the offsetPage value since using it depends on the current count. 200 offsetPage++; 201 } 202 } 203 204 /** 205 * Frees up a previously allocated page so that it can be re-allocated again. 206 * 207 * @param page the page to free up 208 * @throws IOException 209 * If an disk error occurred. 210 * @throws IllegalStateException 211 * if the PageFile is not loaded 212 */ 213 public <T> void free(Page<T> page) throws IOException { 214 pageFile.assertLoaded(); 215 216 // We may need loop to free up a page chain. 217 while (page != null) { 218 219 // Is it already free?? 220 if (page.getType() == Page.PAGE_FREE_TYPE) { 221 return; 222 } 223 224 Page<T> next = null; 225 if (page.getType() == Page.PAGE_PART_TYPE) { 226 next = load(page.getNext(), null); 227 } 228 229 page.makeFree(getWriteTransactionId()); 230 // ensure free page is visible while write is pending 231 pageFile.addToCache(page.copy()); 232 233 DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageFile.getPageSize()); 234 page.write(out); 235 write(page, out.getData()); 236 237 freeList.add(page.getPageId()); 238 page = next; 239 } 240 } 241 242 /** 243 * 244 * @param page 245 * the page to write. The Page object must be fully populated with a valid pageId, type, and data. 246 * @param marshaller 247 * the marshaler to use to load the data portion of the Page, may be null if you do not wish to write the data. 248 * @param overflow 249 * If true, then if the page data marshalls to a bigger size than can fit in one page, then additional 250 * overflow pages are automatically allocated and chained to this page to store all the data. If false, 251 * and the overflow condition would occur, then the PageOverflowIOException is thrown. 252 * @throws IOException 253 * If an disk error occurred. 254 * @throws PageOverflowIOException 255 * If the page data marshalls to size larger than maximum page size and overflow was false. 256 * @throws IllegalStateException 257 * if the PageFile is not loaded 258 */ 259 public <T> void store(Page<T> page, Marshaller<T> marshaller, final boolean overflow) throws IOException { 260 DataByteArrayOutputStream out = (DataByteArrayOutputStream)openOutputStream(page, overflow); 261 if (marshaller != null) { 262 marshaller.writePayload(page.get(), out); 263 } 264 out.close(); 265 } 266 267 /** 268 * @throws IOException 269 */ 270 public OutputStream openOutputStream(Page page, final boolean overflow) throws IOException { 271 pageFile.assertLoaded(); 272 273 // Copy to protect against the end user changing 274 // the page instance while we are doing a write. 275 final Page copy = page.copy(); 276 pageFile.addToCache(copy); 277 278 // 279 // To support writing VERY large data, we override the output stream so 280 // that we 281 // we do the page writes incrementally while the data is being 282 // marshalled. 283 DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageFile.getPageSize() * 2) { 284 Page current = copy; 285 286 @SuppressWarnings("unchecked") 287 @Override 288 protected void onWrite() throws IOException { 289 290 // Are we at an overflow condition? 291 final int pageSize = pageFile.getPageSize(); 292 if (pos >= pageSize) { 293 // If overflow is allowed 294 if (overflow) { 295 296 do { 297 Page next; 298 if (current.getType() == Page.PAGE_PART_TYPE) { 299 next = load(current.getNext(), null); 300 } else { 301 next = allocate(); 302 } 303 304 next.txId = current.txId; 305 306 // Write the page header 307 int oldPos = pos; 308 pos = 0; 309 310 current.makePagePart(next.getPageId(), getWriteTransactionId()); 311 current.write(this); 312 313 // Do the page write.. 314 byte[] data = new byte[pageSize]; 315 System.arraycopy(buf, 0, data, 0, pageSize); 316 Transaction.this.write(current, data); 317 318 // make the new link visible 319 pageFile.addToCache(current); 320 321 // Reset for the next page chunk 322 pos = 0; 323 // The page header marshalled after the data is written. 324 skip(Page.PAGE_HEADER_SIZE); 325 // Move the overflow data after the header. 326 System.arraycopy(buf, pageSize, buf, pos, oldPos - pageSize); 327 pos += oldPos - pageSize; 328 current = next; 329 330 } while (pos > pageSize); 331 } else { 332 throw new PageOverflowIOException("Page overflow."); 333 } 334 } 335 336 } 337 338 @Override 339 public void close() throws IOException { 340 super.close(); 341 342 // We need to free up the rest of the page chain.. 343 if (current.getType() == Page.PAGE_PART_TYPE) { 344 free(current.getNext()); 345 } 346 347 current.makePageEnd(pos, getWriteTransactionId()); 348 349 // make visible as end page 350 pageFile.addToCache(current); 351 352 // Write the header.. 353 pos = 0; 354 current.write(this); 355 356 Transaction.this.write(current, buf); 357 } 358 }; 359 360 // The page header marshaled after the data is written. 361 out.skip(Page.PAGE_HEADER_SIZE); 362 return out; 363 } 364 365 /** 366 * Loads a page from disk. 367 * 368 * @param pageId 369 * the id of the page to load 370 * @param marshaller 371 * the marshaler to use to load the data portion of the Page, may be null if you do not wish to load the data. 372 * @return The page with the given id 373 * @throws IOException 374 * If an disk error occurred. 375 * @throws IllegalStateException 376 * if the PageFile is not loaded 377 */ 378 public <T> Page<T> load(long pageId, Marshaller<T> marshaller) throws IOException { 379 pageFile.assertLoaded(); 380 Page<T> page = new Page<T>(pageId); 381 load(page, marshaller); 382 return page; 383 } 384 385 /** 386 * Loads a page from disk. 387 * 388 * @param page - The pageId field must be properly set 389 * @param marshaller 390 * the marshaler to use to load the data portion of the Page, may be null if you do not wish to load the data. 391 * @throws IOException 392 * If an disk error occurred. 393 * @throws InvalidPageIOException 394 * If the page is is not valid. 395 * @throws IllegalStateException 396 * if the PageFile is not loaded 397 */ 398 @SuppressWarnings("unchecked") 399 public <T> void load(Page<T> page, Marshaller<T> marshaller) throws IOException { 400 pageFile.assertLoaded(); 401 402 // Can't load invalid offsets... 403 long pageId = page.getPageId(); 404 if (pageId < 0) { 405 throw new InvalidPageIOException("Page id is not valid", pageId); 406 } 407 408 // It might be a page this transaction has modified... 409 PageWrite update = writes.get(pageId); 410 if (update != null) { 411 page.copy(update.getPage()); 412 return; 413 } 414 415 // We may be able to get it from the cache... 416 Page<T> t = pageFile.getFromCache(pageId); 417 if (t != null) { 418 page.copy(t); 419 return; 420 } 421 422 if (marshaller != null) { 423 // Full page read.. 424 InputStream is = openInputStream(page); 425 DataInputStream dataIn = new DataInputStream(is); 426 page.set(marshaller.readPayload(dataIn)); 427 is.close(); 428 } else { 429 // Page header read. 430 DataByteArrayInputStream in = new DataByteArrayInputStream(new byte[Page.PAGE_HEADER_SIZE]); 431 pageFile.readPage(pageId, in.getRawData()); 432 page.read(in); 433 page.set(null); 434 } 435 436 // Cache it. 437 if (marshaller != null) { 438 pageFile.addToCache(page); 439 } 440 } 441 442 /** 443 * @see org.apache.activemq.store.kahadb.disk.page.Transaction#load(org.apache.activemq.store.kahadb.disk.page.Page, 444 * org.apache.activemq.store.kahadb.disk.util.Marshaller) 445 */ 446 public InputStream openInputStream(final Page p) throws IOException { 447 448 return new InputStream() { 449 450 private ByteSequence chunk = new ByteSequence(new byte[pageFile.getPageSize()]); 451 private Page page = readPage(p); 452 private int pageCount = 1; 453 454 private Page markPage; 455 private ByteSequence markChunk; 456 457 private Page readPage(Page page) throws IOException { 458 // Read the page data 459 460 pageFile.readPage(page.getPageId(), chunk.getData()); 461 462 chunk.setOffset(0); 463 chunk.setLength(pageFile.getPageSize()); 464 465 DataByteArrayInputStream in = new DataByteArrayInputStream(chunk); 466 page.read(in); 467 468 chunk.setOffset(Page.PAGE_HEADER_SIZE); 469 if (page.getType() == Page.PAGE_END_TYPE) { 470 chunk.setLength((int)(page.getNext())); 471 } 472 473 if (page.getType() == Page.PAGE_FREE_TYPE) { 474 throw new EOFException("Chunk stream does not exist, page: " + page.getPageId() + " is marked free"); 475 } 476 477 return page; 478 } 479 480 public int read() throws IOException { 481 if (!atEOF()) { 482 return chunk.data[chunk.offset++] & 0xff; 483 } else { 484 return -1; 485 } 486 } 487 488 private boolean atEOF() throws IOException { 489 if (chunk.offset < chunk.length) { 490 return false; 491 } 492 if (page.getType() == Page.PAGE_END_TYPE) { 493 return true; 494 } 495 fill(); 496 return chunk.offset >= chunk.length; 497 } 498 499 private void fill() throws IOException { 500 page = readPage(new Page(page.getNext())); 501 pageCount++; 502 } 503 504 public int read(byte[] b) throws IOException { 505 return read(b, 0, b.length); 506 } 507 508 public int read(byte b[], int off, int len) throws IOException { 509 if (!atEOF()) { 510 int rc = 0; 511 while (!atEOF() && rc < len) { 512 len = Math.min(len, chunk.length - chunk.offset); 513 if (len > 0) { 514 System.arraycopy(chunk.data, chunk.offset, b, off, len); 515 chunk.offset += len; 516 } 517 rc += len; 518 } 519 return rc; 520 } else { 521 return -1; 522 } 523 } 524 525 public long skip(long len) throws IOException { 526 if (atEOF()) { 527 int rc = 0; 528 while (!atEOF() && rc < len) { 529 len = Math.min(len, chunk.length - chunk.offset); 530 if (len > 0) { 531 chunk.offset += len; 532 } 533 rc += len; 534 } 535 return rc; 536 } else { 537 return -1; 538 } 539 } 540 541 public int available() { 542 return chunk.length - chunk.offset; 543 } 544 545 public boolean markSupported() { 546 return true; 547 } 548 549 public void mark(int markpos) { 550 markPage = page; 551 byte data[] = new byte[pageFile.getPageSize()]; 552 System.arraycopy(chunk.getData(), 0, data, 0, pageFile.getPageSize()); 553 markChunk = new ByteSequence(data, chunk.getOffset(), chunk.getLength()); 554 } 555 556 public void reset() { 557 page = markPage; 558 chunk = markChunk; 559 } 560 561 }; 562 } 563 564 /** 565 * Allows you to iterate through all active Pages in this object. Pages with type Page.FREE_TYPE are 566 * not included in this iteration. 567 * 568 * Pages removed with Iterator.remove() will not actually get removed until the transaction commits. 569 * 570 * @throws IllegalStateException 571 * if the PageFile is not loaded 572 */ 573 public Iterator<Page> iterator() { 574 return (Iterator<Page>)iterator(false); 575 } 576 577 /** 578 * Allows you to iterate through all active Pages in this object. You can optionally include free pages in the pages 579 * iterated. 580 * 581 * @param includeFreePages - if true, free pages are included in the iteration 582 * @throws IllegalStateException 583 * if the PageFile is not loaded 584 */ 585 public Iterator<Page> iterator(final boolean includeFreePages) { 586 587 pageFile.assertLoaded(); 588 589 return new Iterator<Page>() { 590 591 long nextId; 592 Page nextPage; 593 Page lastPage; 594 595 private void findNextPage() { 596 if (!pageFile.isLoaded()) { 597 throw new IllegalStateException("Cannot iterate the pages when the page file is not loaded"); 598 } 599 600 if (nextPage != null) { 601 return; 602 } 603 604 try { 605 while (nextId < pageFile.getPageCount()) { 606 607 Page page = load(nextId, null); 608 609 if (includeFreePages || page.getType() != Page.PAGE_FREE_TYPE) { 610 nextPage = page; 611 return; 612 } else { 613 nextId++; 614 } 615 } 616 } catch (IOException e) { 617 } 618 } 619 620 public boolean hasNext() { 621 findNextPage(); 622 return nextPage != null; 623 } 624 625 public Page next() { 626 findNextPage(); 627 if (nextPage != null) { 628 lastPage = nextPage; 629 nextPage = null; 630 nextId++; 631 return lastPage; 632 } else { 633 throw new NoSuchElementException(); 634 } 635 } 636 637 @SuppressWarnings("unchecked") 638 public void remove() { 639 if (lastPage == null) { 640 throw new IllegalStateException(); 641 } 642 try { 643 free(lastPage); 644 lastPage = null; 645 } catch (IOException e) { 646 throw new RuntimeException(e); 647 } 648 } 649 }; 650 } 651 652 /////////////////////////////////////////////////////////////////// 653 // Commit / Rollback related methods.. 654 /////////////////////////////////////////////////////////////////// 655 656 /** 657 * Commits the transaction to the PageFile as a single 'Unit of Work'. Either all page updates associated 658 * with the transaction are written to disk or none will. 659 */ 660 public void commit() throws IOException { 661 if( writeTransactionId!=-1 ) { 662 if (tmpFile != null) { 663 LOG.debug("Committing transaction {}: Size {} kb", writeTransactionId, tmpFile.length() / (1024)); 664 pageFile.removeTmpFile(getTempFile(), tmpFile); 665 tmpFile = null; 666 txFile = null; 667 } 668 // Actually do the page writes... 669 pageFile.write(writes.entrySet()); 670 // Release the pages that were freed up in the transaction.. 671 freePages(freeList); 672 673 freeList.clear(); 674 allocateList.clear(); 675 writes.clear(); 676 writeTransactionId = -1; 677 } else { 678 freePages(allocateList); 679 } 680 size = 0; 681 } 682 683 /** 684 * Rolls back the transaction. 685 */ 686 public void rollback() throws IOException { 687 if( writeTransactionId!=-1 ) { 688 if (tmpFile != null) { 689 tmpFile.close(); 690 getTempFile().delete(); 691 tmpFile = null; 692 txFile = null; 693 } 694 // Release the pages that were allocated in the transaction... 695 freePages(allocateList); 696 697 freeList.clear(); 698 allocateList.clear(); 699 writes.clear(); 700 writeTransactionId = -1; 701 } else { 702 freePages(allocateList); 703 } 704 size = 0; 705 } 706 707 private long getWriteTransactionId() { 708 if( writeTransactionId==-1 ) { 709 writeTransactionId = pageFile.getNextWriteTransactionId(); 710 } 711 return writeTransactionId; 712 } 713 714 715 protected File getTempFile() { 716 if (txFile == null) { 717 txFile = new File(getPageFile().getDirectory(), IOHelper.toFileSystemSafeName("tx-" + Long.toString(getWriteTransactionId()) + "-" + Long.toString(System.currentTimeMillis()) + ".tmp")); 718 } 719 return txFile; 720 } 721 722 /** 723 * Queues up a page write that should get done when commit() gets called. 724 */ 725 private void write(final Page page, byte[] data) throws IOException { 726 Long key = page.getPageId(); 727 728 // how much pages we have for this transaction 729 size = writes.size() * (long) pageFile.getPageSize(); 730 731 PageWrite write; 732 733 if (size > maxTransactionSize) { 734 if (tmpFile == null) { 735 tmpFile = new RandomAccessFile(getTempFile(), "rw"); 736 } 737 long location = nextLocation; 738 tmpFile.seek(nextLocation); 739 tmpFile.write(data); 740 nextLocation = location + data.length; 741 write = new PageWrite(page, location, data.length, getTempFile()); 742 } else { 743 write = new PageWrite(page, data); 744 } 745 writes.put(key, write); 746 } 747 748 /** 749 * @param list 750 * @throws RuntimeException 751 */ 752 private void freePages(SequenceSet list) throws RuntimeException { 753 Sequence seq = list.getHead(); 754 while( seq!=null ) { 755 seq.each(new Sequence.Closure<RuntimeException>(){ 756 public void execute(long value) { 757 pageFile.freePage(value); 758 } 759 }); 760 seq = seq.getNext(); 761 } 762 } 763 764 /** 765 * @return true if there are no uncommitted page file updates associated with this transaction. 766 */ 767 public boolean isReadOnly() { 768 return writeTransactionId==-1; 769 } 770 771 /////////////////////////////////////////////////////////////////// 772 // Transaction closure helpers... 773 /////////////////////////////////////////////////////////////////// 774 775 /** 776 * Executes a closure and if it does not throw any exceptions, then it commits the transaction. 777 * If the closure throws an Exception, then the transaction is rolled back. 778 * 779 * @param <T> 780 * @param closure - the work to get exectued. 781 * @throws T if the closure throws it 782 * @throws IOException If the commit fails. 783 */ 784 public <T extends Throwable> void execute(Closure<T> closure) throws T, IOException { 785 boolean success = false; 786 try { 787 closure.execute(this); 788 success = true; 789 } finally { 790 if (success) { 791 commit(); 792 } else { 793 rollback(); 794 } 795 } 796 } 797 798 /** 799 * Executes a closure and if it does not throw any exceptions, then it commits the transaction. 800 * If the closure throws an Exception, then the transaction is rolled back. 801 * 802 * @param <T> 803 * @param closure - the work to get exectued. 804 * @throws T if the closure throws it 805 * @throws IOException If the commit fails. 806 */ 807 public <R, T extends Throwable> R execute(CallableClosure<R, T> closure) throws T, IOException { 808 boolean success = false; 809 try { 810 R rc = closure.execute(this); 811 success = true; 812 return rc; 813 } finally { 814 if (success) { 815 commit(); 816 } else { 817 rollback(); 818 } 819 } 820 } 821}