001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.activemq.store.kahadb.disk.journal;
018
019import java.io.File;
020import java.io.FilenameFilter;
021import java.io.IOException;
022import java.io.UnsupportedEncodingException;
023import java.util.*;
024import java.util.concurrent.ConcurrentHashMap;
025import java.util.concurrent.atomic.AtomicLong;
026import java.util.concurrent.atomic.AtomicReference;
027import java.util.zip.Adler32;
028import java.util.zip.Checksum;
029import org.apache.activemq.store.kahadb.disk.util.LinkedNode;
030import org.apache.activemq.util.IOHelper;
031import org.slf4j.Logger;
032import org.slf4j.LoggerFactory;
033import org.apache.activemq.util.ByteSequence;
034import org.apache.activemq.util.DataByteArrayInputStream;
035import org.apache.activemq.util.DataByteArrayOutputStream;
036import org.apache.activemq.store.kahadb.disk.util.LinkedNodeList;
037import org.apache.activemq.store.kahadb.disk.util.SchedulerTimerTask;
038import org.apache.activemq.store.kahadb.disk.util.Sequence;
039
040/**
041 * Manages DataFiles
042 *
043 *
044 */
045public class Journal {
046    public static final String CALLER_BUFFER_APPENDER = "org.apache.kahadb.journal.CALLER_BUFFER_APPENDER";
047    public static final boolean callerBufferAppender = Boolean.parseBoolean(System.getProperty(CALLER_BUFFER_APPENDER, "false"));
048
049    private static final int MAX_BATCH_SIZE = 32*1024*1024;
050
051    // ITEM_HEAD_SPACE = length + type+ reserved space + SOR
052    public static final int RECORD_HEAD_SPACE = 4 + 1;
053
054    public static final byte USER_RECORD_TYPE = 1;
055    public static final byte BATCH_CONTROL_RECORD_TYPE = 2;
056    // Batch Control Item holds a 4 byte size of the batch and a 8 byte checksum of the batch.
057    public static final byte[] BATCH_CONTROL_RECORD_MAGIC = bytes("WRITE BATCH");
058    public static final int BATCH_CONTROL_RECORD_SIZE = RECORD_HEAD_SPACE+BATCH_CONTROL_RECORD_MAGIC.length+4+8;
059    public static final byte[] BATCH_CONTROL_RECORD_HEADER = createBatchControlRecordHeader();
060
061    private static byte[] createBatchControlRecordHeader() {
062        try {
063            DataByteArrayOutputStream os = new DataByteArrayOutputStream();
064            os.writeInt(BATCH_CONTROL_RECORD_SIZE);
065            os.writeByte(BATCH_CONTROL_RECORD_TYPE);
066            os.write(BATCH_CONTROL_RECORD_MAGIC);
067            ByteSequence sequence = os.toByteSequence();
068            sequence.compact();
069            return sequence.getData();
070        } catch (IOException e) {
071            throw new RuntimeException("Could not create batch control record header.", e);
072        }
073    }
074
075    public static final String DEFAULT_DIRECTORY = ".";
076    public static final String DEFAULT_ARCHIVE_DIRECTORY = "data-archive";
077    public static final String DEFAULT_FILE_PREFIX = "db-";
078    public static final String DEFAULT_FILE_SUFFIX = ".log";
079    public static final int DEFAULT_MAX_FILE_LENGTH = 1024 * 1024 * 32;
080    public static final int DEFAULT_CLEANUP_INTERVAL = 1000 * 30;
081    public static final int PREFERED_DIFF = 1024 * 512;
082    public static final int DEFAULT_MAX_WRITE_BATCH_SIZE = 1024 * 1024 * 4;
083
084    private static final Logger LOG = LoggerFactory.getLogger(Journal.class);
085
086    protected final Map<WriteKey, WriteCommand> inflightWrites = new ConcurrentHashMap<WriteKey, WriteCommand>();
087
088    protected File directory = new File(DEFAULT_DIRECTORY);
089    protected File directoryArchive;
090    private boolean directoryArchiveOverridden = false;
091
092    protected String filePrefix = DEFAULT_FILE_PREFIX;
093    protected String fileSuffix = DEFAULT_FILE_SUFFIX;
094    protected boolean started;
095
096    protected int maxFileLength = DEFAULT_MAX_FILE_LENGTH;
097    protected int preferedFileLength = DEFAULT_MAX_FILE_LENGTH - PREFERED_DIFF;
098    protected int writeBatchSize = DEFAULT_MAX_WRITE_BATCH_SIZE;
099
100    protected FileAppender appender;
101    protected DataFileAccessorPool accessorPool;
102
103    protected Map<Integer, DataFile> fileMap = new HashMap<Integer, DataFile>();
104    protected Map<File, DataFile> fileByFileMap = new LinkedHashMap<File, DataFile>();
105    protected LinkedNodeList<DataFile> dataFiles = new LinkedNodeList<DataFile>();
106
107    protected final AtomicReference<Location> lastAppendLocation = new AtomicReference<Location>();
108    protected Runnable cleanupTask;
109    protected AtomicLong totalLength = new AtomicLong();
110    protected boolean archiveDataLogs;
111    private ReplicationTarget replicationTarget;
112    protected boolean checksum;
113    protected boolean checkForCorruptionOnStartup;
114    protected boolean enableAsyncDiskSync = true;
115    private Timer timer;
116
117    public interface DataFileRemovedListener {
118        void fileRemoved(DataFile datafile);
119    }
120
121    private DataFileRemovedListener dataFileRemovedListener;
122
123    public synchronized void start() throws IOException {
124        if (started) {
125            return;
126        }
127
128        long start = System.currentTimeMillis();
129        accessorPool = new DataFileAccessorPool(this);
130        started = true;
131        preferedFileLength = Math.max(PREFERED_DIFF, getMaxFileLength() - PREFERED_DIFF);
132
133        appender = callerBufferAppender ? new CallerBufferingDataFileAppender(this) : new DataFileAppender(this);
134
135        File[] files = directory.listFiles(new FilenameFilter() {
136            public boolean accept(File dir, String n) {
137                return dir.equals(directory) && n.startsWith(filePrefix) && n.endsWith(fileSuffix);
138            }
139        });
140
141        if (files != null) {
142            for (File file : files) {
143                try {
144                    String n = file.getName();
145                    String numStr = n.substring(filePrefix.length(), n.length()-fileSuffix.length());
146                    int num = Integer.parseInt(numStr);
147                    DataFile dataFile = new DataFile(file, num, preferedFileLength);
148                    fileMap.put(dataFile.getDataFileId(), dataFile);
149                    totalLength.addAndGet(dataFile.getLength());
150                } catch (NumberFormatException e) {
151                    // Ignore file that do not match the pattern.
152                }
153            }
154
155            // Sort the list so that we can link the DataFiles together in the
156            // right order.
157            List<DataFile> l = new ArrayList<DataFile>(fileMap.values());
158            Collections.sort(l);
159            for (DataFile df : l) {
160                if (df.getLength() == 0) {
161                    // possibly the result of a previous failed write
162                    LOG.info("ignoring zero length, partially initialised journal data file: " + df);
163                    continue;
164                }
165                dataFiles.addLast(df);
166                fileByFileMap.put(df.getFile(), df);
167
168                if( isCheckForCorruptionOnStartup() ) {
169                    lastAppendLocation.set(recoveryCheck(df));
170                }
171            }
172        }
173
174        getCurrentWriteFile();
175
176        if( lastAppendLocation.get()==null ) {
177            DataFile df = dataFiles.getTail();
178            lastAppendLocation.set(recoveryCheck(df));
179        }
180
181        cleanupTask = new Runnable() {
182            public void run() {
183                cleanup();
184            }
185        };
186        this.timer = new Timer("KahaDB Scheduler", true);
187        TimerTask task = new SchedulerTimerTask(cleanupTask);
188        this.timer.scheduleAtFixedRate(task, DEFAULT_CLEANUP_INTERVAL,DEFAULT_CLEANUP_INTERVAL);
189        long end = System.currentTimeMillis();
190        LOG.trace("Startup took: "+(end-start)+" ms");
191    }
192
193    private static byte[] bytes(String string) {
194        try {
195            return string.getBytes("UTF-8");
196        } catch (UnsupportedEncodingException e) {
197            throw new RuntimeException(e);
198        }
199    }
200
201    protected Location recoveryCheck(DataFile dataFile) throws IOException {
202        Location location = new Location();
203        location.setDataFileId(dataFile.getDataFileId());
204        location.setOffset(0);
205
206        DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
207        try {
208            while( true ) {
209                int size = checkBatchRecord(reader, location.getOffset());
210                if ( size>=0 ) {
211                    location.setOffset(location.getOffset()+BATCH_CONTROL_RECORD_SIZE+size);
212                } else {
213
214                    // Perhaps it's just some corruption... scan through the file to find the next valid batch record.  We
215                    // may have subsequent valid batch records.
216                    int nextOffset = findNextBatchRecord(reader, location.getOffset()+1);
217                    if( nextOffset >=0 ) {
218                        Sequence sequence = new Sequence(location.getOffset(), nextOffset - 1);
219                        LOG.info("Corrupt journal records found in '"+dataFile.getFile()+"' between offsets: "+sequence);
220                        dataFile.corruptedBlocks.add(sequence);
221                        location.setOffset(nextOffset);
222                    } else {
223                        break;
224                    }
225                }
226            }
227
228        } catch (IOException e) {
229        } finally {
230            accessorPool.closeDataFileAccessor(reader);
231        }
232
233        int existingLen = dataFile.getLength();
234        dataFile.setLength(location.getOffset());
235        if (existingLen > dataFile.getLength()) {
236            totalLength.addAndGet(dataFile.getLength() - existingLen);
237        }
238
239        if( !dataFile.corruptedBlocks.isEmpty() ) {
240            // Is the end of the data file corrupted?
241            if( dataFile.corruptedBlocks.getTail().getLast()+1 == location.getOffset() ) {
242                dataFile.setLength((int) dataFile.corruptedBlocks.removeLastSequence().getFirst());
243            }
244        }
245
246        return location;
247    }
248
249    private int findNextBatchRecord(DataFileAccessor reader, int offset) throws IOException {
250        ByteSequence header = new ByteSequence(BATCH_CONTROL_RECORD_HEADER);
251        byte data[] = new byte[1024*4];
252        ByteSequence bs = new ByteSequence(data, 0, reader.read(offset, data));
253
254        int pos = 0;
255        while( true ) {
256            pos = bs.indexOf(header, pos);
257            if( pos >= 0 ) {
258                return offset+pos;
259            } else {
260                // need to load the next data chunck in..
261                if( bs.length != data.length ) {
262                    // If we had a short read then we were at EOF
263                    return -1;
264                }
265                offset += bs.length-BATCH_CONTROL_RECORD_HEADER.length;
266                bs = new ByteSequence(data, 0, reader.read(offset, data));
267                pos=0;
268            }
269        }
270    }
271
272
273    public int checkBatchRecord(DataFileAccessor reader, int offset) throws IOException {
274        byte controlRecord[] = new byte[BATCH_CONTROL_RECORD_SIZE];
275        DataByteArrayInputStream controlIs = new DataByteArrayInputStream(controlRecord);
276
277        reader.readFully(offset, controlRecord);
278
279        // Assert that it's  a batch record.
280        for( int i=0; i < BATCH_CONTROL_RECORD_HEADER.length; i++ ) {
281            if( controlIs.readByte() != BATCH_CONTROL_RECORD_HEADER[i] ) {
282                return -1;
283            }
284        }
285
286        int size = controlIs.readInt();
287        if( size > MAX_BATCH_SIZE ) {
288            return -1;
289        }
290
291        if( isChecksum() ) {
292
293            long expectedChecksum = controlIs.readLong();
294            if( expectedChecksum == 0 ) {
295                // Checksuming was not enabled when the record was stored.
296                // we can't validate the record :(
297                return size;
298            }
299
300            byte data[] = new byte[size];
301            reader.readFully(offset+BATCH_CONTROL_RECORD_SIZE, data);
302
303            Checksum checksum = new Adler32();
304            checksum.update(data, 0, data.length);
305
306            if( expectedChecksum!=checksum.getValue() ) {
307                return -1;
308            }
309
310        }
311        return size;
312    }
313
314
315    void addToTotalLength(int size) {
316        totalLength.addAndGet(size);
317    }
318
319    public long length() {
320        return totalLength.get();
321    }
322
323    synchronized DataFile getCurrentWriteFile() throws IOException {
324        if (dataFiles.isEmpty()) {
325            rotateWriteFile();
326        }
327        return dataFiles.getTail();
328    }
329
330    synchronized DataFile rotateWriteFile() {
331        int nextNum = !dataFiles.isEmpty() ? dataFiles.getTail().getDataFileId().intValue() + 1 : 1;
332        File file = getFile(nextNum);
333        DataFile nextWriteFile = new DataFile(file, nextNum, preferedFileLength);
334        // actually allocate the disk space
335        fileMap.put(nextWriteFile.getDataFileId(), nextWriteFile);
336        fileByFileMap.put(file, nextWriteFile);
337        dataFiles.addLast(nextWriteFile);
338        return nextWriteFile;
339    }
340
341    public File getFile(int nextNum) {
342        String fileName = filePrefix + nextNum + fileSuffix;
343        File file = new File(directory, fileName);
344        return file;
345    }
346
347    synchronized DataFile getDataFile(Location item) throws IOException {
348        Integer key = Integer.valueOf(item.getDataFileId());
349        DataFile dataFile = fileMap.get(key);
350        if (dataFile == null) {
351            LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
352            throw new IOException("Could not locate data file " + getFile(item.getDataFileId()));
353        }
354        return dataFile;
355    }
356
357    synchronized File getFile(Location item) throws IOException {
358        Integer key = Integer.valueOf(item.getDataFileId());
359        DataFile dataFile = fileMap.get(key);
360        if (dataFile == null) {
361            LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
362            throw new IOException("Could not locate data file " + getFile(item.getDataFileId()));
363        }
364        return dataFile.getFile();
365    }
366
367    private DataFile getNextDataFile(DataFile dataFile) {
368        return dataFile.getNext();
369    }
370
371    public synchronized void close() throws IOException {
372        if (!started) {
373            return;
374        }
375        if (this.timer != null) {
376            this.timer.cancel();
377        }
378        accessorPool.close();
379        appender.close();
380        fileMap.clear();
381        fileByFileMap.clear();
382        dataFiles.clear();
383        lastAppendLocation.set(null);
384        started = false;
385    }
386
387    protected synchronized void cleanup() {
388        if (accessorPool != null) {
389            accessorPool.disposeUnused();
390        }
391    }
392
393    public synchronized boolean delete() throws IOException {
394
395        // Close all open file handles...
396        appender.close();
397        accessorPool.close();
398
399        boolean result = true;
400        for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
401            DataFile dataFile = i.next();
402            totalLength.addAndGet(-dataFile.getLength());
403            result &= dataFile.delete();
404        }
405        fileMap.clear();
406        fileByFileMap.clear();
407        lastAppendLocation.set(null);
408        dataFiles = new LinkedNodeList<DataFile>();
409
410        // reopen open file handles...
411        accessorPool = new DataFileAccessorPool(this);
412        appender = new DataFileAppender(this);
413        return result;
414    }
415
416    public synchronized void removeDataFiles(Set<Integer> files) throws IOException {
417        for (Integer key : files) {
418            // Can't remove the data file (or subsequent files) that is currently being written to.
419            if( key >= lastAppendLocation.get().getDataFileId() ) {
420                continue;
421            }
422            DataFile dataFile = fileMap.get(key);
423            if( dataFile!=null ) {
424                forceRemoveDataFile(dataFile);
425            }
426        }
427    }
428
429    private synchronized void forceRemoveDataFile(DataFile dataFile) throws IOException {
430        accessorPool.disposeDataFileAccessors(dataFile);
431        fileByFileMap.remove(dataFile.getFile());
432        fileMap.remove(dataFile.getDataFileId());
433        totalLength.addAndGet(-dataFile.getLength());
434        dataFile.unlink();
435        if (archiveDataLogs) {
436            File directoryArchive = getDirectoryArchive();
437            if (directoryArchive.exists()) {
438                LOG.debug("Archive directory exists: {}", directoryArchive);
439            } else {
440                if (directoryArchive.isAbsolute())
441                if (LOG.isDebugEnabled()) {
442                    LOG.debug("Archive directory [{}] does not exist - creating it now",
443                            directoryArchive.getAbsolutePath());
444                }
445                IOHelper.mkdirs(directoryArchive);
446            }
447            LOG.debug("Moving data file {} to {} ", dataFile, directoryArchive.getCanonicalPath());
448            dataFile.move(directoryArchive);
449            LOG.debug("Successfully moved data file");
450        } else {
451            LOG.debug("Deleting data file: {}", dataFile);
452            if ( dataFile.delete() ) {
453                LOG.debug("Discarded data file: {}", dataFile);
454            } else {
455                LOG.warn("Failed to discard data file : {}", dataFile.getFile());
456            }
457        }
458        if (dataFileRemovedListener != null) {
459            dataFileRemovedListener.fileRemoved(dataFile);
460        }
461    }
462
463    /**
464     * @return the maxFileLength
465     */
466    public int getMaxFileLength() {
467        return maxFileLength;
468    }
469
470    /**
471     * @param maxFileLength the maxFileLength to set
472     */
473    public void setMaxFileLength(int maxFileLength) {
474        this.maxFileLength = maxFileLength;
475    }
476
477    @Override
478    public String toString() {
479        return directory.toString();
480    }
481
482    public synchronized void appendedExternally(Location loc, int length) throws IOException {
483        DataFile dataFile = null;
484        if( dataFiles.getTail().getDataFileId() == loc.getDataFileId() ) {
485            // It's an update to the current log file..
486            dataFile = dataFiles.getTail();
487            dataFile.incrementLength(length);
488        } else if( dataFiles.getTail().getDataFileId()+1 == loc.getDataFileId() ) {
489            // It's an update to the next log file.
490            int nextNum = loc.getDataFileId();
491            File file = getFile(nextNum);
492            dataFile = new DataFile(file, nextNum, preferedFileLength);
493            // actually allocate the disk space
494            fileMap.put(dataFile.getDataFileId(), dataFile);
495            fileByFileMap.put(file, dataFile);
496            dataFiles.addLast(dataFile);
497        } else {
498            throw new IOException("Invalid external append.");
499        }
500    }
501
502    public synchronized Location getNextLocation(Location location) throws IOException, IllegalStateException {
503
504        Location cur = null;
505        while (true) {
506            if (cur == null) {
507                if (location == null) {
508                    DataFile head = dataFiles.getHead();
509                    if( head == null ) {
510                        return null;
511                    }
512                    cur = new Location();
513                    cur.setDataFileId(head.getDataFileId());
514                    cur.setOffset(0);
515                } else {
516                    // Set to the next offset..
517                    if (location.getSize() == -1) {
518                        cur = new Location(location);
519                    } else {
520                        cur = new Location(location);
521                        cur.setOffset(location.getOffset() + location.getSize());
522                    }
523                }
524            } else {
525                cur.setOffset(cur.getOffset() + cur.getSize());
526            }
527
528            DataFile dataFile = getDataFile(cur);
529
530            // Did it go into the next file??
531            if (dataFile.getLength() <= cur.getOffset()) {
532                dataFile = getNextDataFile(dataFile);
533                if (dataFile == null) {
534                    return null;
535                } else {
536                    cur.setDataFileId(dataFile.getDataFileId().intValue());
537                    cur.setOffset(0);
538                }
539            }
540
541            // Load in location size and type.
542            DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
543            try {
544                reader.readLocationDetails(cur);
545            } finally {
546                accessorPool.closeDataFileAccessor(reader);
547            }
548
549            if (cur.getType() == 0) {
550                return null;
551            } else if (cur.getType() == USER_RECORD_TYPE) {
552                // Only return user records.
553                return cur;
554            }
555        }
556    }
557
558    public synchronized Location getNextLocation(File file, Location lastLocation, boolean thisFileOnly) throws IllegalStateException, IOException {
559        DataFile df = fileByFileMap.get(file);
560        return getNextLocation(df, lastLocation, thisFileOnly);
561    }
562
563    public synchronized Location getNextLocation(DataFile dataFile, Location lastLocation, boolean thisFileOnly) throws IOException, IllegalStateException {
564
565        Location cur = null;
566        while (true) {
567            if (cur == null) {
568                if (lastLocation == null) {
569                    DataFile head = dataFile.getHeadNode();
570                    cur = new Location();
571                    cur.setDataFileId(head.getDataFileId());
572                    cur.setOffset(0);
573                } else {
574                    // Set to the next offset..
575                    cur = new Location(lastLocation);
576                    cur.setOffset(cur.getOffset() + cur.getSize());
577                }
578            } else {
579                cur.setOffset(cur.getOffset() + cur.getSize());
580            }
581
582            // Did it go into the next file??
583            if (dataFile.getLength() <= cur.getOffset()) {
584                if (thisFileOnly) {
585                    return null;
586                } else {
587                    dataFile = getNextDataFile(dataFile);
588                    if (dataFile == null) {
589                        return null;
590                    } else {
591                        cur.setDataFileId(dataFile.getDataFileId().intValue());
592                        cur.setOffset(0);
593                    }
594                }
595            }
596
597            // Load in location size and type.
598            DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
599            try {
600                reader.readLocationDetails(cur);
601            } finally {
602                accessorPool.closeDataFileAccessor(reader);
603            }
604
605            if (cur.getType() == 0) {
606                return null;
607            } else if (cur.getType() > 0) {
608                // Only return user records.
609                return cur;
610            }
611        }
612    }
613
614    public synchronized ByteSequence read(Location location) throws IOException, IllegalStateException {
615        DataFile dataFile = getDataFile(location);
616        DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
617        ByteSequence rc = null;
618        try {
619            rc = reader.readRecord(location);
620        } finally {
621            accessorPool.closeDataFileAccessor(reader);
622        }
623        return rc;
624    }
625
626    public Location write(ByteSequence data, boolean sync) throws IOException, IllegalStateException {
627        Location loc = appender.storeItem(data, Location.USER_TYPE, sync);
628        return loc;
629    }
630
631    public Location write(ByteSequence data, Runnable onComplete) throws IOException, IllegalStateException {
632        Location loc = appender.storeItem(data, Location.USER_TYPE, onComplete);
633        return loc;
634    }
635
636    public void update(Location location, ByteSequence data, boolean sync) throws IOException {
637        DataFile dataFile = getDataFile(location);
638        DataFileAccessor updater = accessorPool.openDataFileAccessor(dataFile);
639        try {
640            updater.updateRecord(location, data, sync);
641        } finally {
642            accessorPool.closeDataFileAccessor(updater);
643        }
644    }
645
646    public File getDirectory() {
647        return directory;
648    }
649
650    public void setDirectory(File directory) {
651        this.directory = directory;
652    }
653
654    public String getFilePrefix() {
655        return filePrefix;
656    }
657
658    public void setFilePrefix(String filePrefix) {
659        this.filePrefix = filePrefix;
660    }
661
662    public Map<WriteKey, WriteCommand> getInflightWrites() {
663        return inflightWrites;
664    }
665
666    public Location getLastAppendLocation() {
667        return lastAppendLocation.get();
668    }
669
670    public void setLastAppendLocation(Location lastSyncedLocation) {
671        this.lastAppendLocation.set(lastSyncedLocation);
672    }
673
674    public File getDirectoryArchive() {
675        if (!directoryArchiveOverridden && (directoryArchive == null)) {
676            // create the directoryArchive relative to the journal location
677            directoryArchive = new File(directory.getAbsolutePath() +
678                    File.separator + DEFAULT_ARCHIVE_DIRECTORY);
679        }
680        return directoryArchive;
681    }
682
683    public void setDirectoryArchive(File directoryArchive) {
684        directoryArchiveOverridden = true;
685        this.directoryArchive = directoryArchive;
686    }
687
688    public boolean isArchiveDataLogs() {
689        return archiveDataLogs;
690    }
691
692    public void setArchiveDataLogs(boolean archiveDataLogs) {
693        this.archiveDataLogs = archiveDataLogs;
694    }
695
696    synchronized public Integer getCurrentDataFileId() {
697        if (dataFiles.isEmpty())
698            return null;
699        return dataFiles.getTail().getDataFileId();
700    }
701
702    /**
703     * Get a set of files - only valid after start()
704     *
705     * @return files currently being used
706     */
707    public Set<File> getFiles() {
708        return fileByFileMap.keySet();
709    }
710
711    public synchronized Map<Integer, DataFile> getFileMap() {
712        return new TreeMap<Integer, DataFile>(fileMap);
713    }
714
715    public long getDiskSize() {
716        long tailLength=0;
717        synchronized( this ) {
718            if( !dataFiles.isEmpty() ) {
719                tailLength = dataFiles.getTail().getLength();
720            }
721        }
722
723        long rc = totalLength.get();
724
725        // The last file is actually at a minimum preferedFileLength big.
726        if( tailLength < preferedFileLength ) {
727            rc -= tailLength;
728            rc += preferedFileLength;
729        }
730        return rc;
731    }
732
733    public void setReplicationTarget(ReplicationTarget replicationTarget) {
734        this.replicationTarget = replicationTarget;
735    }
736    public ReplicationTarget getReplicationTarget() {
737        return replicationTarget;
738    }
739
740    public String getFileSuffix() {
741        return fileSuffix;
742    }
743
744    public void setFileSuffix(String fileSuffix) {
745        this.fileSuffix = fileSuffix;
746    }
747
748    public boolean isChecksum() {
749        return checksum;
750    }
751
752    public void setChecksum(boolean checksumWrites) {
753        this.checksum = checksumWrites;
754    }
755
756    public boolean isCheckForCorruptionOnStartup() {
757        return checkForCorruptionOnStartup;
758    }
759
760    public void setCheckForCorruptionOnStartup(boolean checkForCorruptionOnStartup) {
761        this.checkForCorruptionOnStartup = checkForCorruptionOnStartup;
762    }
763
764    public void setWriteBatchSize(int writeBatchSize) {
765        this.writeBatchSize = writeBatchSize;
766    }
767
768    public int getWriteBatchSize() {
769        return writeBatchSize;
770    }
771
772    public void setSizeAccumulator(AtomicLong storeSizeAccumulator) {
773       this.totalLength = storeSizeAccumulator;
774    }
775
776    public void setEnableAsyncDiskSync(boolean val) {
777        this.enableAsyncDiskSync = val;
778    }
779
780    public boolean isEnableAsyncDiskSync() {
781        return enableAsyncDiskSync;
782    }
783
784    public void setDataFileRemovedListener(DataFileRemovedListener dataFileRemovedListener) {
785        this.dataFileRemovedListener = dataFileRemovedListener;
786    }
787
788    public static class WriteCommand extends LinkedNode<WriteCommand> {
789        public final Location location;
790        public final ByteSequence data;
791        final boolean sync;
792        public final Runnable onComplete;
793
794        public WriteCommand(Location location, ByteSequence data, boolean sync) {
795            this.location = location;
796            this.data = data;
797            this.sync = sync;
798            this.onComplete = null;
799        }
800
801        public WriteCommand(Location location, ByteSequence data, Runnable onComplete) {
802            this.location = location;
803            this.data = data;
804            this.onComplete = onComplete;
805            this.sync = false;
806        }
807    }
808
809    public static class WriteKey {
810        private final int file;
811        private final long offset;
812        private final int hash;
813
814        public WriteKey(Location item) {
815            file = item.getDataFileId();
816            offset = item.getOffset();
817            // TODO: see if we can build a better hash
818            hash = (int)(file ^ offset);
819        }
820
821        public int hashCode() {
822            return hash;
823        }
824
825        public boolean equals(Object obj) {
826            if (obj instanceof WriteKey) {
827                WriteKey di = (WriteKey)obj;
828                return di.file == file && di.offset == offset;
829            }
830            return false;
831        }
832    }
833}