001package org.cache2k.storage; 002 003/* 004 * #%L 005 * cache2k core package 006 * %% 007 * Copyright (C) 2000 - 2015 headissue GmbH, Munich 008 * %% 009 * This program is free software: you can redistribute it and/or modify 010 * it under the terms of the GNU General Public License as 011 * published by the Free Software Foundation, either version 3 of the 012 * License, or (at your option) any later version. 013 * 014 * This program is distributed in the hope that it will be useful, 015 * but WITHOUT ANY WARRANTY; without even the implied warranty of 016 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 017 * GNU General Public License for more details. 018 * 019 * You should have received a copy of the GNU General Public 020 * License along with this program. If not, see 021 * <http://www.gnu.org/licenses/gpl-3.0.html>. 022 * #L% 023 */ 024 025import org.cache2k.StorageConfiguration; 026import org.cache2k.impl.ExceptionWrapper; 027 028import java.io.DataOutput; 029import java.io.File; 030import java.io.FileNotFoundException; 031import java.io.FileOutputStream; 032import java.io.IOException; 033import java.io.ObjectInput; 034import java.io.ObjectOutput; 035import java.io.RandomAccessFile; 036import java.io.Serializable; 037import java.nio.ByteBuffer; 038import java.nio.ReadOnlyBufferException; 039import java.nio.channels.FileChannel; 040import java.security.MessageDigest; 041import java.security.NoSuchAlgorithmException; 042import java.util.ArrayDeque; 043import java.util.ArrayList; 044import java.util.Collection; 045import java.util.Collections; 046import java.util.HashMap; 047import java.util.HashSet; 048import java.util.Iterator; 049import java.util.LinkedHashMap; 050import java.util.Map; 051import java.util.Queue; 052import java.util.Set; 053import java.util.concurrent.Callable; 054import java.util.concurrent.ExecutorService; 055 056import org.cache2k.storage.FreeSpaceMap.Slot; 057import org.cache2k.impl.util.TunableConstants; 058import org.cache2k.impl.util.TunableFactory; 059 060 061/** 062 * Implements a robust storage on a file or a byte buffer. 063 * 064 * <p> 065 * The storage tries to be robust and does not require a clean shutdown, so it 066 * will survive machine crashes. In this case it uses the latest data possible that 067 * is known to be intact. The amount of data loss can be controlled by specifying 068 * a commit interval. 069 * </p> 070 * 071 * <p/>Possible optimizations: More specialized data structures, compaction. 072 * 073 * @author Jens Wilke; created: 2014-03-27 074 */ 075public class ImageFileStorage 076 implements CacheStorage, FlushableStorage, EntryExpiryUpdateableStorage { 077 078 /** Number of bytes we used for on disk disk checksum of our descriptor */ 079 final static int CHECKSUM_BYTES = 16; 080 081 final static int DESCRIPTOR_COUNT = 2; 082 083 final static String DESCRIPTOR_MAGIC = "CACHE2K STORAGE 00"; 084 085 final static Marshaller DESCRIPTOR_MARSHALLER = new StandardMarshaller(); 086 087 final static Marshaller DEFAULT_MARSHALLER = DESCRIPTOR_MARSHALLER; 088 089 boolean dataLost = false; 090 Tunable tunable = TunableFactory.get(Tunable.class); 091 Marshaller keyMarshaller = DEFAULT_MARSHALLER; 092 Marshaller valueMarshaller = DEFAULT_MARSHALLER; 093 Marshaller universalMarshaller = DEFAULT_MARSHALLER; 094 Marshaller exceptionMarshaller = DEFAULT_MARSHALLER; 095 RandomAccessFile file; 096 ByteBuffer buffer; 097 098 public final FreeSpaceMap freeMap = new FreeSpaceMap(); 099 100 Map<Object, HeapEntry> values; 101 102 final Object valuesLock = new Object(); 103 final Object commitLock = new Object(); 104 105 /** buffer entries added since last commit */ 106 HashMap<Object, HeapEntry> newBufferEntries; 107 108 /** entries deleted since last commit */ 109 HashMap<Object, HeapEntry> deletedBufferEntries; 110 111 /** 112 * Entries still needed originating from the earliest index file. Every commit 113 * entries will be removed that are newly written. Each commit also 114 * partially rewrites some of the entries here to make the file redundant. 115 */ 116 HashMap<Object, HeapEntry> entriesInEarlierIndex; 117 118 /** 119 * All entries committed to the current index file. Updated within commit phase. 120 * This is used to fill up {@link #entriesInEarlierIndex} when starting a 121 * new index file. committedEntries is subset of values. 122 */ 123 HashMap<Object, HeapEntry> committedEntries; 124 125 /** 126 *protected by valuesLock 127 */ 128 SlotBucket justUnusedSlots = new SlotBucket(); 129 130 /** 131 * Protected by commitLock 132 */ 133 Queue<SlotBucket> slotsToFreeQueue = new ArrayDeque<SlotBucket>(); 134 135 BufferDescriptor descriptor; 136 String fileName; 137 138 boolean readOnly; 139 140 /** 141 * capacity is by default unlimited. 142 */ 143 int entryCapacity = Integer.MAX_VALUE; 144 long missCount = 0; 145 long hitCount = 0; 146 long putCount = 0; 147 long evictCount = 0; 148 long removeCount = 0; 149 long freedLastCommit = 0; 150 CacheStorageContext context; 151 152 public ImageFileStorage(Tunable t) throws IOException, ClassNotFoundException { 153 tunable = t; 154 } 155 156 public ImageFileStorage() { 157 } 158 159 public void open(CacheStorageContext ctx, StorageConfiguration cfg) throws IOException { 160 context = ctx; 161 if (ctx.getProperties() != null) { 162 tunable = TunableFactory.get(ctx.getProperties(), Tunable.class); 163 } 164 if (cfg.getStorageName() != null) { 165 fileName = cfg.getStorageName(); 166 } 167 if (fileName == null) { 168 fileName = 169 "cache2k-storage:" + ctx.getManagerName() + ":" + ctx.getCacheName(); 170 } 171 if (cfg.getLocation() != null && cfg.getLocation().length() > 0) { 172 File f = new File(cfg.getLocation()); 173 if (!f.isDirectory()) { 174 throw new IllegalArgumentException("location is not directory"); 175 } 176 fileName = f.getPath() + File.separator + fileName; 177 } 178 entryCapacity = cfg.getEntryCapacity(); 179 readOnly = cfg.isReadOnly(); 180 reopen(); 181 } 182 183 private void reopen() throws IOException { 184 try { 185 if (readOnly) { 186 file = null; 187 try { 188 file = new RandomAccessFile(fileName + ".img", "r"); 189 } catch (FileNotFoundException ignore) { 190 } 191 } else { 192 file = new RandomAccessFile(fileName + ".img", "rw"); 193 } 194 resetBufferFromFile(); 195 synchronized (freeMap) { 196 freeMap.init(); 197 freeMap.freeSpace(0, (int) getFileLength()); 198 } 199 if (entryCapacity == Integer.MAX_VALUE) { 200 values = new HashMap<Object, HeapEntry>(); 201 } else { 202 values = new LinkedHashMap<Object, HeapEntry>(100, .75F, true) { 203 204 @Override 205 protected boolean removeEldestEntry(Map.Entry<Object, HeapEntry> _eldest) { 206 if (getEntryCount() > entryCapacity) { 207 evict(_eldest.getValue()); 208 return true; 209 } 210 return false; 211 } 212 213 }; 214 } 215 newBufferEntries = new HashMap<Object, HeapEntry>(); 216 deletedBufferEntries = new HashMap<Object, HeapEntry>(); 217 justUnusedSlots = new SlotBucket(); 218 slotsToFreeQueue = new ArrayDeque<SlotBucket>(); 219 entriesInEarlierIndex = createEarlierIndexEntryHash(); 220 committedEntries = new HashMap<Object, HeapEntry>(); 221 BufferDescriptor d = readLatestIntactBufferDescriptor(); 222 if (d != null) { 223 try { 224 descriptor = d; 225 initializeFromDisk(); 226 } catch (IOException ex) { 227 System.err.println(fileName + " got IOException: " + ex); 228 descriptor = d = null; 229 } 230 } 231 if (d == null) { 232 if (buffer.capacity() > 0) { 233 dataLost = true; 234 } 235 initializeNewStorage(); 236 } 237 } catch (ClassNotFoundException e) { 238 throw new IOException(e); 239 } 240 } 241 242 private long getFileLength() throws IOException { 243 if (file == null) { 244 return 0; 245 } 246 return file.length(); 247 } 248 249 private void initializeFromDisk() throws IOException, ClassNotFoundException { 250 MarshallerFactory _factory = context.getMarshallerFactory(); 251 keyMarshaller = _factory.createMarshaller(descriptor.keyMarshallerParameters); 252 valueMarshaller = _factory.createMarshaller(descriptor.valueMarshallerParameters); 253 exceptionMarshaller = _factory.createMarshaller(descriptor.exceptionMarshallerParameters); 254 readIndex(); 255 } 256 257 private void initializeNewStorage() throws IOException { 258 descriptor = new BufferDescriptor(); 259 descriptor.storageCreated = System.currentTimeMillis(); 260 CacheStorageContext ctx = context; 261 keyMarshaller = ctx.getMarshallerFactory().createMarshaller(ctx.getKeyType()); 262 valueMarshaller = ctx.getMarshallerFactory().createMarshaller(ctx.getValueType()); 263 exceptionMarshaller = ctx.getMarshallerFactory().createMarshaller(Throwable.class); 264 } 265 266 public void close() throws Exception { 267 synchronized (commitLock) { 268 if (isClosed()) { 269 return; 270 } 271 synchronized (valuesLock) { 272 boolean _empty = values.size() == 0; 273 fastClose(); 274 if (_empty) { 275 removeFiles(); 276 } 277 } 278 } 279 } 280 281 /** 282 * Remove the files. We do no synchronize here, since cache guarantees we are alone. 283 */ 284 public void clear() throws IOException { 285 long _counters = putCount + missCount + hitCount + removeCount + evictCount; 286 if (file != null) { 287 fastClose(); 288 } 289 removeFiles(); 290 reopen(); 291 try { 292 Thread.sleep(7); 293 } catch (InterruptedException ignore) { 294 } 295 long _counters2 = putCount + missCount + hitCount + removeCount + evictCount; 296 if (_counters2 != _counters) { 297 throw new IllegalStateException("detected operations while clearing."); 298 } 299 } 300 301 /** 302 * When no entry is in the storage or when clear is 303 * called, then remove all files from the filesystem. 304 */ 305 private void removeFiles() { 306 boolean _ignore; 307 for (int i = 0; i < DESCRIPTOR_COUNT; i++) { 308 _ignore = new File(fileName + "-" + i + ".dsc").delete(); 309 } 310 int idx = descriptor.lastIndexFile; 311 boolean _deleted; 312 do { 313 _deleted = new File(fileName + "-" + idx + ".idx").delete(); 314 idx--; 315 if (idx <= 0) { 316 idx = tunable.highestIndexNumber; 317 } 318 } while (_deleted); 319 _ignore = new File(fileName + ".img").delete(); 320 } 321 322 private void resetBufferFromFile() throws IOException { 323 if (!readOnly) { 324 buffer = 325 file.getChannel().map( 326 FileChannel.MapMode.READ_WRITE, 327 0, file.length()); 328 } else { 329 if (file == null) { 330 buffer = ByteBuffer.allocate(0); 331 } else { 332 buffer = 333 file.getChannel().map( 334 FileChannel.MapMode.READ_ONLY, 335 0, file.length()); 336 } 337 } 338 } 339 340 /** 341 * Close immediately without doing a commit. 342 */ 343 public void fastClose() throws IOException { 344 synchronized (valuesLock) { 345 values = null; 346 freeMap.init(); 347 buffer = null; 348 file.close(); 349 file = null; 350 justUnusedSlots = null; 351 slotsToFreeQueue = null; 352 } 353 } 354 355 public StorageEntry get(Object key) 356 throws IOException, ClassNotFoundException { 357 HeapEntry be; 358 synchronized (valuesLock) { 359 be = values.get(key); 360 if (be == null) { 361 missCount++; 362 return null; 363 } 364 hitCount++; 365 } 366 return returnEntry(be); 367 } 368 369 public boolean contains(Object key) throws IOException { 370 synchronized (valuesLock) { 371 if (!values.containsKey(key)) { 372 missCount++; 373 return false; 374 } 375 hitCount++; 376 return true; 377 } 378 } 379 380 public boolean remove(Object key) throws IOException, ClassNotFoundException { 381 HeapEntry be; 382 synchronized (valuesLock) { 383 be = values.remove(key); 384 if (be == null) { 385 return false; 386 } 387 reallyRemove(be); 388 removeCount++; 389 } 390 return true; 391 } 392 393 /** 394 * Called by remove and when an eviction needs to be done. 395 */ 396 private void reallyRemove(HeapEntry be) { 397 deletedBufferEntries.put(be.key, be); 398 newBufferEntries.remove(be.key); 399 justUnusedSlots.add(be); 400 } 401 402 private void evict(HeapEntry be) { 403 reallyRemove(be); 404 evictCount++; 405 } 406 407 private DiskEntry returnEntry(HeapEntry be) throws IOException, ClassNotFoundException { 408 ByteBuffer bb = buffer.duplicate(); 409 bb.position((int) be.position); 410 DiskEntry e = new DiskEntry(); 411 e.entryExpiryTime = be.entryExpireTime; 412 e.key = be.key; 413 e.readMetaInfo(bb, descriptor.storageCreated); 414 int _type = e.getValueTypeNumber(); 415 if (_type == TYPE_NULL) { 416 return e; 417 } 418 bb.limit((int) (be.position + be.size)); 419 if (_type == TYPE_VALUE) { 420 e.value = valueMarshaller.unmarshall(bb); 421 } else { 422 e.value = new ExceptionWrapper((Throwable) exceptionMarshaller.unmarshall(bb)); 423 } 424 return e; 425 } 426 427 final static byte[] ZERO_LENGTH_BYTE_ARRAY = new byte[0]; 428 429 /** 430 * Store a new entry. To achieve robustness without implementing a WAL there 431 * is no update in place. Each entry gets a newly allocated space. The freed 432 * space will be made available later in time. 433 * 434 * <p/>Parallel reads on the same storage entry still read on the now 435 * freed space. This is okay since the space will be reallocated later in 436 * time. There is no real protection against a put and get race. However, 437 * we only get in trouble if the get will need several seconds to 438 * finish. 439 */ 440 public void put(StorageEntry e) throws IOException, ClassNotFoundException { 441 Object o = e.getValueOrException(); 442 byte[] _marshalledValue = ZERO_LENGTH_BYTE_ARRAY; 443 int _neededSize = 0; 444 byte _type; 445 if (o == null) { 446 _type = TYPE_NULL; 447 } else { 448 if (o instanceof ExceptionWrapper) { 449 _type = TYPE_EXCEPTION; 450 _marshalledValue = exceptionMarshaller.marshall(((ExceptionWrapper) o).getException()); 451 } else if (valueMarshaller.supports(o)) { 452 _type = TYPE_VALUE; 453 _marshalledValue = valueMarshaller.marshall(o); 454 } else { 455 _type = TYPE_UNIVERSAL; 456 _marshalledValue = universalMarshaller.marshall(o); 457 } 458 _neededSize = _marshalledValue.length; 459 } 460 _neededSize += DiskEntry.calculateMetaInfoSize(e, descriptor.storageCreated, _type); 461 ByteBuffer bb; 462 HeapEntry _newEntry; 463 FreeSpaceMap.Slot s = reserveSpace(_neededSize); 464 bb = buffer.duplicate(); 465 bb.position((int) s.position); 466 DiskEntry.writeMetaInfo(bb, e, descriptor.storageCreated, _type); 467 int _usedSize = (int) (bb.position() - s.position) + _marshalledValue.length; 468 _newEntry = new HeapEntry(e.getKey(), s.position, _usedSize, e.getEntryExpiryTime()); 469 if (s.size != _usedSize) { 470 s.size -= _usedSize; 471 s.position += _usedSize; 472 synchronized (freeMap) { 473 freeMap.put(s); 474 } 475 } 476 bb.put(_marshalledValue); 477 synchronized (valuesLock) { 478 HeapEntry be = values.get(e.getKey()); 479 if (be != null) { 480 justUnusedSlots.add(be); 481 } 482 deletedBufferEntries.remove(e.getKey()); 483 newBufferEntries.put(e.getKey(), _newEntry); 484 values.put(e.getKey(), _newEntry); 485 putCount++; 486 } 487 } 488 489 long calcSize(Collection<HeapEntry> set) { 490 long v = 0; 491 if (set != null) { 492 for (HeapEntry e: set) { 493 v += e.size; 494 } 495 } 496 return v; 497 } 498 499 long calculateSpaceToFree() { 500 long s = justUnusedSlots.getSpaceToFree(); 501 for (SlotBucket b : slotsToFreeQueue) { 502 s += b.getSpaceToFree(); 503 } 504 return s; 505 } 506 507 long calculateUsedSpace() { 508 long s = 0; 509 s += calcSize(values.values()); 510 s += calculateSpaceToFree(); 511 return s; 512 } 513 514 @Override 515 public int getEntryCount() { 516 synchronized (valuesLock) { 517 return values.size(); 518 } 519 } 520 521 public long getFreeSpace() { 522 synchronized (freeMap) { 523 return freeMap.getFreeSpace(); 524 } 525 } 526 527 public long getTotalValueSpace() { 528 return buffer.capacity(); 529 } 530 531 public int getUncommittedEntryCount() { 532 return newBufferEntries.size() + deletedBufferEntries.size(); 533 } 534 535 /** 536 * Flag if there was a problem at the last startup and probably some data was lost. 537 * This is an indicator for an unclean shutdown, crash, etc. 538 */ 539 public boolean isDataLost() { 540 return dataLost; 541 } 542 543 /** 544 * Called when there is no more space available. Allocates new space and 545 * returns the area in a free slot. The free slot needs to be inserted 546 * in the maps by the caller. 547 */ 548 Slot reserveSpace(int _neededSpace) throws IOException { 549 synchronized (freeMap) { 550 Slot s = freeMap.findFree(_neededSpace); 551 if (s != null) { 552 return s; 553 } 554 if (readOnly) { 555 throw new ReadOnlyBufferException(); 556 } 557 long _length = file.length(); 558 s = freeMap.reserveSlotEndingAt(_length); 559 if (s != null) { 560 _neededSpace -= s.size; 561 s.size += _neededSpace; 562 } else { 563 s = new Slot(_length, _neededSpace); 564 } 565 if (tunable.extensionSize >= 2) { 566 s.size += tunable.extensionSize - 1; 567 s.size -= s.size % tunable.extensionSize; 568 } 569 file.setLength(s.getNextPosition()); 570 resetBufferFromFile(); 571 return s; 572 } 573 } 574 575 void readIndex() throws IOException, ClassNotFoundException { 576 KeyIndexReader r = new KeyIndexReader(); 577 r.readKeyIndex(); 578 recalculateFreeSpaceMapAndRemoveDeletedEntries(); 579 } 580 581 BufferDescriptor readLatestIntactBufferDescriptor() throws IOException, ClassNotFoundException { 582 BufferDescriptor bd = null; 583 for (int i = 0; i < DESCRIPTOR_COUNT; i++) { 584 try { 585 BufferDescriptor bd2 = readDescriptor(i); 586 if (bd2 != null && (bd == null || bd.descriptorVersion < bd2.descriptorVersion)) { 587 bd = bd2; 588 } 589 } catch (IOException ex) { 590 } 591 } 592 return bd; 593 } 594 595 BufferDescriptor readDescriptor(int idx) throws IOException, ClassNotFoundException { 596 File f = new File(fileName + "-" + idx + ".dsc"); 597 if (!f.exists()) { 598 return null; 599 } 600 RandomAccessFile raf = new RandomAccessFile(f, "r"); 601 try { 602 for (int i = 0; i < DESCRIPTOR_MAGIC.length(); i++) { 603 if (DESCRIPTOR_MAGIC.charAt(i) != raf.read()) { 604 return null; 605 } 606 } 607 byte[] _checkSumFirstBytes = new byte[CHECKSUM_BYTES]; 608 raf.read(_checkSumFirstBytes); 609 byte[] _serializedDescriptorObject = new byte[(int) (raf.length() - raf.getFilePointer())]; 610 raf.read(_serializedDescriptorObject); 611 byte[] _refSum = calcCheckSum(_serializedDescriptorObject); 612 for (int i = 0; i < CHECKSUM_BYTES; i++) { 613 if (_checkSumFirstBytes[i] != _refSum[i]) { 614 return null; 615 } 616 } 617 return 618 (BufferDescriptor) DESCRIPTOR_MARSHALLER.unmarshall(_serializedDescriptorObject); 619 } finally { 620 raf.close(); 621 } 622 } 623 624 void writeDescriptor() throws IOException { 625 int idx = (int) (descriptor.descriptorVersion % DESCRIPTOR_COUNT); 626 RandomAccessFile raf = new RandomAccessFile(fileName + "-" + idx + ".dsc", "rw"); 627 raf.setLength(0); 628 for (int i = 0; i < DESCRIPTOR_MAGIC.length(); i++) { 629 raf.write(DESCRIPTOR_MAGIC.charAt(i)); 630 } 631 byte[] _serializedDescriptorObject = DESCRIPTOR_MARSHALLER.marshall(descriptor); 632 byte[] _checkSum = calcCheckSum(_serializedDescriptorObject); 633 raf.write(_checkSum, 0, CHECKSUM_BYTES); 634 raf.write(_serializedDescriptorObject); 635 raf.close(); 636 descriptor.descriptorVersion++; 637 } 638 639 /** 640 * Recalculate the slots of empty space, by iterating over all buffer entries 641 * and cutting out the allocated areas 642 */ 643 void recalculateFreeSpaceMapAndRemoveDeletedEntries() { 644 synchronized (freeMap) { 645 HashSet<Object> _deletedKey = new HashSet<Object>(); 646 for (HeapEntry e : values.values()) { 647 if (e.position < 0) { 648 _deletedKey.add(e.key); 649 continue; 650 } 651 freeMap.allocateSpace(e.position, e.size); 652 } 653 for (Object k : _deletedKey) { 654 values.remove(k); 655 } 656 } 657 } 658 659 /** 660 * Write key to object index to disk for all modified entries. The implementation only works 661 * single threaded. 662 * 663 * @throws IOException 664 */ 665 public void flush(FlushContext ctx, long now) throws IOException { 666 synchronized (commitLock) { 667 byte _earliestIndexBefore = descriptor.earliestIndexFile; 668 if (isClosed()) { 669 throw new IllegalStateException("storage closed"); 670 } 671 CommitWorker _worker; 672 synchronized (valuesLock) { 673 if (newBufferEntries.size() == 0 && deletedBufferEntries.size() == 0) { 674 return; 675 } 676 _worker = new CommitWorker(); 677 _worker.timestamp = now; 678 _worker.newEntries = newBufferEntries; 679 _worker.deletedEntries = deletedBufferEntries; 680 _worker.workerFreeSlots = justUnusedSlots; 681 justUnusedSlots = new SlotBucket(); 682 newBufferEntries = new HashMap<Object, HeapEntry>(); 683 deletedBufferEntries = new HashMap<Object, HeapEntry>(); 684 descriptor.entryCount = getEntryCount(); 685 descriptor.writtenTime = now; 686 } 687 file.getChannel().force(false); 688 _worker.write(); 689 if (descriptor.keyMarshallerParameters == null) { 690 descriptor.keyMarshallerParameters = keyMarshaller.getFactoryParameters(); 691 descriptor.valueMarshallerParameters = valueMarshaller.getFactoryParameters(); 692 descriptor.exceptionMarshallerParameters = exceptionMarshaller.getFactoryParameters(); 693 descriptor.keyType = context.getKeyType().getName(); 694 descriptor.valueType = context.getValueType().getName(); 695 } 696 writeDescriptor(); 697 _worker.freeSpace(); 698 if (_earliestIndexBefore >= 0 && 699 _earliestIndexBefore != descriptor.earliestIndexFile) { 700 boolean _ignore = new File(generateIndexFileName(_earliestIndexBefore)).delete(); 701 } 702 truncateFile(); 703 } 704 } 705 706 public boolean isClosed() { 707 return file == null; 708 } 709 710 /** 711 * Don't write out the oldest entries that we also have in our updated lists. 712 */ 713 static void sortOut(Map<Object, HeapEntry> map, Set<Object> _keys) { 714 for (Object k: _keys) { 715 map.remove(k); 716 } 717 } 718 719 class CommitWorker { 720 721 long timestamp; 722 RandomAccessFile randomAccessFile; 723 HashMap<Object, HeapEntry> newEntries; 724 HashMap<Object, HeapEntry> deletedEntries; 725 HashMap<Object, HeapEntry> rewriteEntries = new HashMap<Object, HeapEntry>(); 726 SlotBucket workerFreeSlots; 727 byte indexFileNo; 728 long position; 729 730 boolean forceNewFile = false; 731 732 void write() throws IOException { 733 indexFileNo = descriptor.lastIndexFile; 734 checkForEntriesToRewrite(); 735 checkStartNewIndex(); 736 if (forceNewFile) { 737 for (HeapEntry e : committedEntries.values()) { 738 e.indexFileNumber = indexFileNo; 739 entriesInEarlierIndex.put(e.key, e); 740 } 741 committedEntries = new HashMap<Object, HeapEntry>(); 742 descriptor.indexEntries = 0; 743 } 744 try { 745 openFile(); 746 writeIndexChunk(); 747 } finally { 748 if (randomAccessFile != null) { 749 randomAccessFile.close(); 750 } 751 } 752 updateCommittedEntries(); 753 sortOut(entriesInEarlierIndex, committedEntries.keySet()); 754 descriptor.indexEntries += totalEntriesToWrite(); 755 descriptor.lastKeyIndexPosition = position; 756 descriptor.lastIndexFile = indexFileNo; 757 Iterator<HeapEntry> it = entriesInEarlierIndex.values().iterator(); 758 if (it.hasNext()) { 759 HeapEntry _earliestEntry = it.next(); 760 descriptor.earliestIndexFile = _earliestEntry.indexFileNumber; 761 } else { 762 descriptor.earliestIndexFile = indexFileNo; 763 } 764 } 765 766 private int totalEntriesToWrite() { 767 return newEntries.size() + deletedEntries.size() + rewriteEntries.size(); 768 } 769 770 void writeIndexChunk() throws IOException { 771 IndexChunkDescriptor d = new IndexChunkDescriptor(); 772 d.lastIndexFile = descriptor.lastIndexFile; 773 d.lastKeyIndexPosition = descriptor.lastKeyIndexPosition; 774 d.elementCount = totalEntriesToWrite(); 775 d.write(randomAccessFile); 776 FileOutputStream out = new FileOutputStream(randomAccessFile.getFD()); 777 ObjectOutput oos = keyMarshaller.startOutput(out); 778 for (HeapEntry e : newEntries.values()) { 779 e.write(oos); 780 } 781 for (HeapEntry e : deletedEntries.values()) { 782 e.writeDeleted(oos); 783 } 784 for (HeapEntry e : rewriteEntries.values()) { 785 e.write(oos); 786 } 787 oos.close(); 788 out.close(); 789 } 790 791 void openFile() throws IOException { 792 if (indexFileNo == -1 || forceNewFile) { 793 position = 0; 794 if (indexFileNo == tunable.highestIndexNumber) { 795 indexFileNo = 0; 796 } else { 797 indexFileNo++; 798 } 799 String _name = generateIndexFileName(indexFileNo); 800 randomAccessFile = new RandomAccessFile(_name, "rw"); 801 randomAccessFile.seek(0); 802 randomAccessFile.setLength(0); 803 } else { 804 String _name = generateIndexFileName(indexFileNo); 805 randomAccessFile = new RandomAccessFile(_name, "rw"); 806 position = randomAccessFile.length(); 807 randomAccessFile.seek(position); 808 } 809 } 810 811 /** 812 * Partially/fully rewrite the entries within {@link #entriesInEarlierIndex} 813 * to be able to remove the earliest index file in the future. 814 */ 815 private void checkForEntriesToRewrite() { 816 if (entriesInEarlierIndex.size() > 0) { 817 sortOut(entriesInEarlierIndex, newEntries.keySet()); 818 sortOut(entriesInEarlierIndex, deletedEntries.keySet()); 819 int _writeCnt = newEntries.size() + deletedEntries.size(); 820 if (_writeCnt * tunable.rewriteCompleteFactor >= entriesInEarlierIndex.size()) { 821 rewriteEntries = entriesInEarlierIndex; 822 entriesInEarlierIndex = createEarlierIndexEntryHash(); 823 } else { 824 rewriteEntries = new HashMap<Object, HeapEntry>(); 825 int cnt = _writeCnt * tunable.rewritePartialFactor; 826 Iterator<HeapEntry> it = entriesInEarlierIndex.values().iterator(); 827 while (cnt > 0 && it.hasNext()) { 828 HeapEntry e = it.next(); 829 rewriteEntries.put(e.key, e); 830 cnt--; 831 } 832 sortOut(entriesInEarlierIndex, rewriteEntries.keySet()); 833 } 834 } 835 } 836 837 /** 838 * Should we start writing a new index file? 839 */ 840 void checkStartNewIndex() { 841 int _totalEntriesInIndexFile = descriptor.indexEntries + totalEntriesToWrite(); 842 if (_totalEntriesInIndexFile > descriptor.entryCount * tunable.indexFileFactor) { 843 forceNewFile = true; 844 } 845 } 846 847 void updateCommittedEntries() { 848 committedEntries.putAll(newEntries); 849 for (Object k : deletedEntries.keySet()) { 850 committedEntries.put(k, new HeapEntry(k, 0, -1, 0)); 851 } 852 committedEntries.putAll(rewriteEntries); 853 } 854 855 /** 856 * Free the used space. 857 */ 858 void freeSpace() { 859 workerFreeSlots.time = timestamp; 860 slotsToFreeQueue.add(workerFreeSlots); 861 SlotBucket b = slotsToFreeQueue.peek(); 862 long _freed = 0; 863 while ((b.time + tunable.freeSpaceAfterMillis) <= timestamp) { 864 b = slotsToFreeQueue.remove(); 865 synchronized (freeMap) { 866 for (Slot s : b) { 867 freeMap.freeSpace(s); 868 _freed += s.getSize(); 869 } 870 } 871 b = slotsToFreeQueue.peek(); 872 } 873 freedLastCommit = _freed; 874 } 875 876 } 877 878 private LinkedHashMap<Object, HeapEntry> createEarlierIndexEntryHash() { 879 return new LinkedHashMap<Object, HeapEntry>(8, 0.75F, true); 880 } 881 882 String generateIndexFileName(byte _fileNo) { 883 return fileName + "-" + _fileNo + ".idx"; 884 } 885 886 /** 887 * Calculates an sha1 checksum used for the descriptor. Expected 888 * to be always at least {@link #CHECKSUM_BYTES} bytes long. 889 */ 890 byte[] calcCheckSum(byte[] ba) throws IOException { 891 try { 892 MessageDigest md = MessageDigest.getInstance("sha1"); 893 byte[] out = md.digest(ba); 894 return out; 895 } catch (NoSuchAlgorithmException ex) { 896 throw new IOException("sha1 missing, never happens?!"); 897 } 898 } 899 900 @Override 901 public void visit(final VisitContext ctx, final EntryFilter f, final EntryVisitor v) throws Exception { 902 ArrayList<HeapEntry> _allEntries; 903 synchronized (valuesLock) { 904 _allEntries = new ArrayList<HeapEntry>(values.size()); 905 for (HeapEntry e : values.values()) { 906 if (f == null || f.shouldInclude(e.key)) { 907 _allEntries.add(e); 908 } 909 } 910 } 911 ExecutorService ex = ctx.getExecutorService(); 912 for (HeapEntry e : _allEntries) { 913 if (ctx.shouldStop()) { 914 break; 915 } 916 final HeapEntry be = e; 917 Callable<Void> r = new Callable<Void>() { 918 @Override 919 public Void call() throws Exception { 920 v.visit(returnEntry(be)); 921 return null; 922 } 923 }; 924 ex.submit(r); 925 } 926 } 927 928 @Override 929 public void updateEntryExpireTime(Object key, long _millis) throws Exception { 930 synchronized (valuesLock) { 931 HeapEntry e = values.get(key); 932 if (e != null) { 933 e.entryExpireTime = _millis; 934 } 935 } 936 } 937 938 /** 939 * Truncate the file, if there is a trailing free slot. There are no compactions. 940 * This is not perfect, but good enough. There should always some fluctuations 941 * in the entries so at some time there will be free space at the end of the file. 942 */ 943 private void truncateFile() throws IOException { 944 Slot s; 945 synchronized (freeMap) { 946 s = freeMap.getHighestSlot(); 947 if (s != null && s.getNextPosition() == file.length()) { 948 freeMap.allocateSpace(s); 949 context.getLog().info("Truncating file from size " + file.length() + " to " + s.getPosition()); 950 file.setLength(s.getPosition()); 951 file.getChannel().force(true); 952 resetBufferFromFile(); 953 } 954 } 955 } 956 957 958 public long getPutCnt() { return putCount; } 959 960 /** 961 * Prints out current state. The output is not necessarily consistent, because 962 * no lock is used. 963 */ 964 @Override 965 public String toString() { 966 if (isClosed()) { 967 return "DirectFileStorage(fileName=" + fileName + ", CLOSED)"; 968 } 969 FreeSpaceMap _freeMapCopy = freeMap; 970 Map<Object, HeapEntry> _valuesCopy = values; 971 if (_freeMapCopy == null || _valuesCopy == null) { 972 return "DirectFileStorage(fileName=" + fileName + ", UNKOWN)"; 973 } 974 long _spaceToFree; 975 long _totalValueSpace; 976 synchronized (commitLock) { 977 synchronized (valuesLock) { 978 _spaceToFree = calculateSpaceToFree(); 979 _totalValueSpace = getTotalValueSpace(); 980 } 981 } 982 long _freeSpace; 983 long _freeSlots; 984 long _largestSlot; 985 long _smallestSlot; 986 synchronized (_freeMapCopy) { 987 _freeSpace = _freeMapCopy.getFreeSpace(); 988 _freeSlots = _freeMapCopy.getSlotCount(); 989 _largestSlot = _freeMapCopy.getSizeOfLargestSlot(); 990 _smallestSlot = _freeMapCopy.getSizeOfSmallestSlot(); 991 } 992 return "DirectFileStorage(fileName=" + fileName + ", " + 993 "entryCapacity=" + entryCapacity + ", " + 994 "entryCnt=" + _valuesCopy.size() + ", " + 995 "totalSpace=" + getTotalValueSpace() + ", " + 996 "usedSpace=" + (_totalValueSpace - _freeSpace) + ", " + 997 "freeSpace=" + _freeSpace + ", " + 998 "spaceToFree=" + _spaceToFree + ", " + 999 "freeSlots=" + _freeSlots + ", " + 1000 "smallestSlot=" + _smallestSlot + ", " + 1001 "largestSlot=" + _largestSlot + ", " + 1002 "hitCnt=" + hitCount + ", " + 1003 "missCnt=" + missCount + ", " + 1004 "putCnt=" + putCount + ", " + 1005 "evictCnt=" + evictCount + ", " + 1006 "removeCnt=" + removeCount + ", " + 1007 "bufferDescriptor=" + descriptor + ")"; 1008 } 1009 1010 class KeyIndexReader { 1011 1012 byte currentlyReadingIndexFile = -1; 1013 RandomAccessFile randomAccessFile; 1014 ByteBuffer indexBuffer; 1015 Set<Object> readKeys = new HashSet<Object>(); 1016 1017 void readKeyIndex() throws IOException, ClassNotFoundException { 1018 entriesInEarlierIndex = new LinkedHashMap<Object, HeapEntry>(); 1019 committedEntries = new HashMap<Object, HeapEntry>(); 1020 byte _fileNo = descriptor.lastIndexFile; 1021 long _keyPosition = descriptor.lastKeyIndexPosition; 1022 for (;;) { 1023 IndexChunkDescriptor d = readChunk(_fileNo, _keyPosition); 1024 if (readCompleted()) { 1025 break; 1026 } 1027 if (_fileNo != d.lastIndexFile) { 1028 for (HeapEntry e : committedEntries.values()) { 1029 e.indexFileNumber = _fileNo; 1030 entriesInEarlierIndex.put(e.key, e); 1031 } 1032 } 1033 _fileNo = d.lastIndexFile; 1034 _keyPosition = d.lastKeyIndexPosition; 1035 } 1036 if (randomAccessFile != null) { 1037 randomAccessFile.close(); 1038 } 1039 if (entriesInEarlierIndex == committedEntries) { 1040 entriesInEarlierIndex = new HashMap<Object, HeapEntry>(); 1041 } 1042 } 1043 1044 /** 1045 * We read until capacity limit is reached or all stored index entries 1046 * are read. The capacity may be lower then before. 1047 */ 1048 private boolean readCompleted() { 1049 return 1050 values.size() >= descriptor.entryCount || 1051 values.size() >= entryCapacity; 1052 } 1053 1054 void openFile(byte _fileNo) throws IOException { 1055 if (randomAccessFile != null) { 1056 randomAccessFile.close(); 1057 } 1058 entriesInEarlierIndex = new HashMap<Object, HeapEntry>(); 1059 randomAccessFile = new RandomAccessFile(generateIndexFileName(_fileNo), "r"); 1060 indexBuffer = randomAccessFile.getChannel().map(FileChannel.MapMode.READ_ONLY, 0, randomAccessFile.length()); 1061 currentlyReadingIndexFile = _fileNo; 1062 } 1063 1064 IndexChunkDescriptor readChunk(byte _fileNo, long _position) 1065 throws IOException, ClassNotFoundException { 1066 if (currentlyReadingIndexFile != _fileNo) { 1067 openFile(_fileNo); 1068 } 1069 indexBuffer.position((int) _position); 1070 IndexChunkDescriptor d = new IndexChunkDescriptor(); 1071 d.read(indexBuffer); 1072 ObjectInput in = keyMarshaller.startInput(new ByteBufferInputStream(indexBuffer)); 1073 int cnt = d.elementCount; 1074 int _readCnt = readKeys.size(); 1075 do { 1076 HeapEntry e = new HeapEntry(in); 1077 if (!readKeys.contains(e.key)) { 1078 e.indexFileNumber = _fileNo; 1079 readKeys.add(e.key); 1080 entriesInEarlierIndex.put(e.key, e); 1081 if (!e.isDeleted()) { 1082 values.put(e.key, e); 1083 } 1084 if (readCompleted()) { 1085 break; 1086 } 1087 } 1088 cnt--; 1089 } while (cnt > 0); 1090 in.close(); 1091 if (_readCnt == readKeys.size()) { 1092 throw new IOException("no new data, at index: " + _fileNo + "/" + _position); 1093 } 1094 return d; 1095 } 1096 1097 } 1098 1099 static class BufferDescriptor implements Serializable { 1100 1101 boolean clean = false; 1102 byte lastIndexFile = -1; 1103 byte earliestIndexFile = -1; 1104 long lastKeyIndexPosition = -1; 1105 /** Count of entries in the last index */ 1106 int indexEntries = 0; 1107 int entryCount = 0; 1108 int freeSpace = 0; 1109 long storageCreated; 1110 long descriptorVersion = 0; 1111 long writtenTime; 1112 1113 MarshallerFactory.Parameters keyMarshallerParameters; 1114 MarshallerFactory.Parameters valueMarshallerParameters; 1115 MarshallerFactory.Parameters exceptionMarshallerParameters; 1116 1117 String keyType; 1118 String keyMarshallerType; 1119 String valueType; 1120 String valueMarshallerType; 1121 1122 @Override 1123 public String toString() { 1124 return "BufferDescriptor{" + 1125 "clean=" + clean + 1126 ", lastIndexFile=" + lastIndexFile + 1127 ", earliestIndexFile=" + earliestIndexFile + 1128 ", lastKeyIndexPosition=" + lastKeyIndexPosition + 1129 ", elementCount=" + entryCount + 1130 ", freeSpace=" + freeSpace + 1131 ", descriptorVersion=" + descriptorVersion + 1132 ", writtenTime=" + writtenTime + 1133 ", keyType='" + keyType + '\'' + 1134 ", keyMarshallerType='" + keyMarshallerType + '\'' + 1135 ", valueType='" + valueType + '\'' + 1136 ", valueMarshallerType='" + valueMarshallerType + '\'' + 1137 '}'; 1138 } 1139 } 1140 1141 static class IndexChunkDescriptor { 1142 byte lastIndexFile; 1143 long lastKeyIndexPosition; 1144 int elementCount; 1145 1146 void read(ByteBuffer buf) { 1147 lastIndexFile = buf.get(); 1148 lastKeyIndexPosition = buf.getLong(); 1149 elementCount = buf.getInt(); 1150 } 1151 1152 void write(DataOutput buf) throws IOException { 1153 buf.write(lastIndexFile); 1154 buf.writeLong(lastKeyIndexPosition); 1155 buf.writeInt(elementCount); 1156 } 1157 } 1158 1159 /** 1160 * Entry data kept in the java heap. 1161 */ 1162 static class HeapEntry { 1163 1164 Object key; 1165 long position; 1166 int size; // size or -1 if deleted 1167 long entryExpireTime; 1168 byte indexFileNumber = -1; 1169 1170 HeapEntry(ObjectInput in) throws IOException, ClassNotFoundException { 1171 position = in.readLong(); 1172 size = in.readInt(); 1173 key = in.readObject(); 1174 entryExpireTime = in.readLong(); 1175 } 1176 1177 HeapEntry(Object _key, long _position, int _size, long _entryExpireTime) { 1178 key = _key; 1179 position = _position; 1180 size = _size; 1181 entryExpireTime = _entryExpireTime; 1182 } 1183 1184 void write(ObjectOutput out) throws IOException { 1185 out.writeLong(position); 1186 out.writeInt(size); 1187 out.writeObject(key); 1188 out.writeLong(entryExpireTime); 1189 } 1190 1191 void writeDeleted(ObjectOutput out) throws IOException { 1192 out.writeLong(0); 1193 out.writeInt(-1); 1194 out.writeObject(key); 1195 out.writeLong(0); 1196 } 1197 1198 /** 1199 * marks if this key mapping was deleted, so later index entries should not be used. 1200 * this is never set for in-memory deleted objects. 1201 */ 1202 boolean isDeleted() { 1203 return size < 0; 1204 } 1205 1206 @Override 1207 public String toString() { 1208 return "IndexEntry{" + 1209 "key=" + key + 1210 ", position=" + position + 1211 ", size=" + size + 1212 '}'; 1213 } 1214 } 1215 1216 final static int TYPE_MASK = 0x03; 1217 final static int TYPE_NULL = 0; 1218 /** Value is marshalled with the value marshaller */ 1219 final static int TYPE_VALUE = 1; 1220 final static int TYPE_EXCEPTION = 2; 1221 /** Value is marshalled with the universal marshaller */ 1222 final static int TYPE_UNIVERSAL = 3; 1223 final static int FLAG_HAS_VALUE_EXPIRY_TIME = 4; 1224 final static int FLAG_HAS_ENTRY_EXPIRY_TIME = 8; 1225 final static int FLAG_HAS_CREATED_OR_UPDATED = 32; 1226 1227 public static long readCompressedLong(ByteBuffer b) { 1228 short s = b.getShort(); 1229 if (s >= 0) { 1230 return s; 1231 } 1232 long v = s & 0x7fff; 1233 s = b.getShort(); 1234 if (s >= 0) { 1235 return v | (long) s << 15; 1236 } 1237 v |= ((long) s & 0x07fff) << 15; 1238 s = b.getShort(); 1239 if (s >= 0) { 1240 return v | (long) s << 30; 1241 } 1242 v |= ((long) s & 0x07fff) << 30; 1243 s = b.getShort(); 1244 if (s >= 0) { 1245 return v | (long) s << 45; 1246 } 1247 v |= ((long) s & 0x07fff) << 45; 1248 s = b.getShort(); 1249 return v | (long) s << 60; 1250 } 1251 1252 /** 1253 * Write a long as multiple short values. The msb in the short means 1254 * that there is another short coming. 1255 */ 1256 public static void writeCompressedLong(ByteBuffer b, long v) { 1257 long s = v & 0x07fff; 1258 while (s != v) { 1259 b.putShort((short) (s | 0x8000)); 1260 v >>>= 15; 1261 s = v & 0x07fff; 1262 } 1263 b.putShort((short) v); 1264 } 1265 1266 public static int calculateCompressedLongSize(long v) { 1267 int cnt = 1; 1268 long s = v & 0x07fff; 1269 while (s != v) { 1270 cnt++; 1271 v >>>= 15; 1272 s = v & 0x07fff; 1273 } 1274 return cnt << 1; 1275 } 1276 1277 /** 1278 * This object represents the data that is written and read from the disk. 1279 */ 1280 static class DiskEntry implements StorageEntry { 1281 1282 Object key; 1283 Object value; 1284 1285 int flags; 1286 1287 /* set from the buffer entry */ 1288 long valueExpiryTime; 1289 long createdOrUpdated; 1290 long entryExpiryTime; 1291 1292 public int getValueTypeNumber() { 1293 return flags & TYPE_MASK; 1294 } 1295 1296 @Override 1297 public Object getKey() { 1298 return key; 1299 } 1300 1301 @Override 1302 public Object getValueOrException() { 1303 return value; 1304 } 1305 1306 @Override 1307 public long getCreatedOrUpdated() { 1308 return createdOrUpdated; 1309 } 1310 1311 @Override 1312 public long getValueExpiryTime() { 1313 return valueExpiryTime; 1314 } 1315 1316 @Override 1317 public long getEntryExpiryTime() { 1318 return entryExpiryTime; 1319 } 1320 1321 void readMetaInfo(ByteBuffer bb, long _timeReference) { 1322 flags = bb.get(); 1323 if ((flags & FLAG_HAS_CREATED_OR_UPDATED) > 0) { 1324 createdOrUpdated = readCompressedLong(bb) + _timeReference; 1325 if ((flags & FLAG_HAS_VALUE_EXPIRY_TIME) > 0) { 1326 valueExpiryTime = readCompressedLong(bb) + createdOrUpdated; 1327 } 1328 if ((flags & FLAG_HAS_ENTRY_EXPIRY_TIME) > 0) { 1329 entryExpiryTime = readCompressedLong(bb) + createdOrUpdated; 1330 } 1331 return; 1332 } 1333 if ((flags & FLAG_HAS_VALUE_EXPIRY_TIME) > 0) { 1334 valueExpiryTime = readCompressedLong(bb) + _timeReference; 1335 } 1336 if ((flags & FLAG_HAS_ENTRY_EXPIRY_TIME) > 0) { 1337 entryExpiryTime = readCompressedLong(bb) + _timeReference; 1338 } 1339 } 1340 1341 static void writeMetaInfo(ByteBuffer bb, StorageEntry e, long _timeReference, int _type) { 1342 int _flags = 1343 _type | 1344 (e.getEntryExpiryTime() != 0 ? FLAG_HAS_ENTRY_EXPIRY_TIME : 0) | 1345 (e.getCreatedOrUpdated() != 0 ? FLAG_HAS_CREATED_OR_UPDATED : 0) | 1346 (e.getValueExpiryTime() != 0 ? FLAG_HAS_VALUE_EXPIRY_TIME : 0) ; 1347 bb.put((byte) _flags); 1348 if ((_flags & FLAG_HAS_CREATED_OR_UPDATED) > 0) { 1349 writeCompressedLong(bb, e.getCreatedOrUpdated() - _timeReference); 1350 if ((_flags & FLAG_HAS_VALUE_EXPIRY_TIME) > 0) { 1351 writeCompressedLong(bb, e.getValueExpiryTime() - e.getCreatedOrUpdated()); 1352 } 1353 if ((_flags & FLAG_HAS_ENTRY_EXPIRY_TIME) > 0) { 1354 writeCompressedLong(bb, e.getEntryExpiryTime() - e.getCreatedOrUpdated()); 1355 } 1356 return; 1357 } 1358 if ((_flags & FLAG_HAS_VALUE_EXPIRY_TIME) > 0) { 1359 writeCompressedLong(bb, e.getValueExpiryTime() - _timeReference); 1360 } 1361 if ((_flags & FLAG_HAS_ENTRY_EXPIRY_TIME) > 0) { 1362 writeCompressedLong(bb, e.getEntryExpiryTime() - _timeReference); 1363 } 1364 } 1365 1366 static int calculateMetaInfoSize(StorageEntry e, long _timeReference, int _type) { 1367 int _flags = 1368 _type | 1369 (e.getEntryExpiryTime() != 0 ? FLAG_HAS_ENTRY_EXPIRY_TIME : 0) | 1370 (e.getValueExpiryTime() != 0 ? FLAG_HAS_VALUE_EXPIRY_TIME : 0) | 1371 (e.getCreatedOrUpdated() != 0 ? FLAG_HAS_CREATED_OR_UPDATED : 0); 1372 int cnt = 1; 1373 if ((_flags & FLAG_HAS_CREATED_OR_UPDATED) > 0) { 1374 cnt += calculateCompressedLongSize(e.getCreatedOrUpdated() - _timeReference); 1375 if ((_flags & FLAG_HAS_VALUE_EXPIRY_TIME) > 0) { 1376 cnt += calculateCompressedLongSize(e.getValueExpiryTime() - e.getCreatedOrUpdated()); 1377 } 1378 if ((_flags & FLAG_HAS_ENTRY_EXPIRY_TIME) > 0) { 1379 cnt += calculateCompressedLongSize(e.getEntryExpiryTime() - e.getCreatedOrUpdated()); 1380 } 1381 return cnt; 1382 } 1383 if ((_flags & FLAG_HAS_VALUE_EXPIRY_TIME) > 0) { 1384 cnt += calculateCompressedLongSize(e.getValueExpiryTime() - _timeReference); 1385 } 1386 if ((_flags & FLAG_HAS_ENTRY_EXPIRY_TIME) > 0) { 1387 cnt += calculateCompressedLongSize(e.getEntryExpiryTime() - _timeReference); 1388 } 1389 return cnt; 1390 } 1391 1392 public String toString() { 1393 return 1394 "DiskEntry(key=\"" + key + "\"" + ", " + 1395 "valueExpiryTime=" + valueExpiryTime + ", " + 1396 "entryExpiryTime=" + entryExpiryTime + ")"; 1397 } 1398 1399 } 1400 1401 public static class SlotBucket implements Iterable<Slot> { 1402 1403 long time; 1404 Collection<Slot> slots = new ArrayList<Slot>(); 1405 1406 public void add(HeapEntry be) { 1407 add(be.position, be.size); 1408 } 1409 1410 public void add(Slot s) { 1411 slots.add(s); 1412 } 1413 1414 public void add(long _position, int _size) { 1415 add(new Slot(_position, _size)); 1416 } 1417 1418 public long getSpaceToFree() { 1419 long n = 0; 1420 for (Slot s : slots) { 1421 n += s.size; 1422 } 1423 return n; 1424 } 1425 1426 @Override 1427 public Iterator<Slot> iterator() { 1428 return slots.iterator(); 1429 } 1430 } 1431 1432 /** 1433 * Some parameters factored out, which may be modified if needed. 1434 * All these parameters have no effect on the written data format. 1435 * Usually there is no need to change some of the values. This 1436 * is basically provided for documentary reason and to have all 1437 * "magic values" in a central place. 1438 */ 1439 public static class Tunable extends TunableConstants { 1440 1441 /** 1442 * Factor of the entry count in the storage to limit the index 1443 * file size. After the limit a new file is started. 1444 * Old entries are rewritten time after time to make the last 1445 * file redundant and to free the disk space. 1446 */ 1447 public int indexFileFactor = 3; 1448 1449 public int rewriteCompleteFactor = 3; 1450 1451 public int rewritePartialFactor = 2; 1452 1453 public byte highestIndexNumber = 127; 1454 1455 /** 1456 * The storage is expanded by the given increment, if set to 0 it 1457 * is only expanded by the object size, each time to space is needed. 1458 * Allocating space for each object separately is a big power drain. 1459 */ 1460 public int extensionSize = 4096; 1461 1462 /** 1463 * Time after unused space is finally freed and maybe reused. 1464 * We cannot reuse space immediately or do an update in place, since 1465 * there may be ongoing read requests. 1466 */ 1467 public int freeSpaceAfterMillis = 15 * 1000; 1468 1469 } 1470 1471 public static class Provider 1472 extends CacheStorageProviderWithVoidConfig 1473 implements SimpleSingleFileStorage { 1474 1475 @Override 1476 public ImageFileStorage create(CacheStorageContext ctx, StorageConfiguration cfg) throws IOException { 1477 ImageFileStorage img = new ImageFileStorage(); 1478 img.open(ctx, cfg); 1479 return img; 1480 } 1481 1482 } 1483 1484}