001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.activemq.store.kahadb.disk.journal;
018
019import java.io.*;
020import java.nio.ByteBuffer;
021import java.nio.channels.FileChannel;
022import java.util.*;
023import java.util.concurrent.ConcurrentHashMap;
024import java.util.concurrent.atomic.AtomicLong;
025import java.util.concurrent.atomic.AtomicReference;
026import java.util.zip.Adler32;
027import java.util.zip.Checksum;
028import org.apache.activemq.store.kahadb.disk.util.LinkedNode;
029import org.apache.activemq.store.kahadb.disk.util.SequenceSet;
030import org.apache.activemq.util.*;
031import org.slf4j.Logger;
032import org.slf4j.LoggerFactory;
033import org.apache.activemq.store.kahadb.disk.util.LinkedNodeList;
034import org.apache.activemq.store.kahadb.disk.util.SchedulerTimerTask;
035import org.apache.activemq.store.kahadb.disk.util.Sequence;
036
037/**
038 * Manages DataFiles
039 *
040 *
041 */
042public class Journal {
043    public static final String CALLER_BUFFER_APPENDER = "org.apache.kahadb.journal.CALLER_BUFFER_APPENDER";
044    public static final boolean callerBufferAppender = Boolean.parseBoolean(System.getProperty(CALLER_BUFFER_APPENDER, "false"));
045
046    private static final int PREALLOC_CHUNK_SIZE = 1024*1024;
047
048    // ITEM_HEAD_SPACE = length + type+ reserved space + SOR
049    public static final int RECORD_HEAD_SPACE = 4 + 1;
050
051    public static final byte USER_RECORD_TYPE = 1;
052    public static final byte BATCH_CONTROL_RECORD_TYPE = 2;
053    // Batch Control Item holds a 4 byte size of the batch and a 8 byte checksum of the batch.
054    public static final byte[] BATCH_CONTROL_RECORD_MAGIC = bytes("WRITE BATCH");
055    public static final int BATCH_CONTROL_RECORD_SIZE = RECORD_HEAD_SPACE+BATCH_CONTROL_RECORD_MAGIC.length+4+8;
056    public static final byte[] BATCH_CONTROL_RECORD_HEADER = createBatchControlRecordHeader();
057
058    // tackle corruption when checksum is disabled or corrupt with zeros, minimise data loss
059    public void corruptRecoveryLocation(Location recoveryPosition) throws IOException {
060        DataFile dataFile = getDataFile(recoveryPosition);
061        // with corruption on recovery we have no faith in the content - slip to the next batch record or eof
062        DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
063        try {
064            int nextOffset = findNextBatchRecord(reader, recoveryPosition.getOffset() + 1);
065            Sequence sequence = new Sequence(recoveryPosition.getOffset(), nextOffset >= 0 ? nextOffset - 1 : dataFile.getLength() - 1);
066            LOG.info("Corrupt journal records found in '" + dataFile.getFile() + "' between offsets: " + sequence);
067
068            // skip corruption on getNextLocation
069            recoveryPosition.setOffset((int) sequence.getLast() + 1);
070            recoveryPosition.setSize(-1);
071
072            dataFile.corruptedBlocks.add(sequence);
073
074        } catch (IOException e) {
075        } finally {
076            accessorPool.closeDataFileAccessor(reader);
077        }
078    }
079
080    public DataFileAccessorPool getAccessorPool() {
081        return accessorPool;
082    }
083
084    public enum PreallocationStrategy {
085        SPARSE_FILE,
086        OS_KERNEL_COPY,
087        ZEROS;
088    }
089
090    public enum PreallocationScope {
091        ENTIRE_JOURNAL;
092    }
093
094    private static byte[] createBatchControlRecordHeader() {
095        try {
096            DataByteArrayOutputStream os = new DataByteArrayOutputStream();
097            os.writeInt(BATCH_CONTROL_RECORD_SIZE);
098            os.writeByte(BATCH_CONTROL_RECORD_TYPE);
099            os.write(BATCH_CONTROL_RECORD_MAGIC);
100            ByteSequence sequence = os.toByteSequence();
101            sequence.compact();
102            return sequence.getData();
103        } catch (IOException e) {
104            throw new RuntimeException("Could not create batch control record header.", e);
105        }
106    }
107
108    public static final String DEFAULT_DIRECTORY = ".";
109    public static final String DEFAULT_ARCHIVE_DIRECTORY = "data-archive";
110    public static final String DEFAULT_FILE_PREFIX = "db-";
111    public static final String DEFAULT_FILE_SUFFIX = ".log";
112    public static final int DEFAULT_MAX_FILE_LENGTH = 1024 * 1024 * 32;
113    public static final int DEFAULT_CLEANUP_INTERVAL = 1000 * 30;
114    public static final int PREFERED_DIFF = 1024 * 512;
115    public static final int DEFAULT_MAX_WRITE_BATCH_SIZE = 1024 * 1024 * 4;
116
117    private static final Logger LOG = LoggerFactory.getLogger(Journal.class);
118
119    protected final Map<WriteKey, WriteCommand> inflightWrites = new ConcurrentHashMap<WriteKey, WriteCommand>();
120
121    protected File directory = new File(DEFAULT_DIRECTORY);
122    protected File directoryArchive;
123    private boolean directoryArchiveOverridden = false;
124
125    protected String filePrefix = DEFAULT_FILE_PREFIX;
126    protected String fileSuffix = DEFAULT_FILE_SUFFIX;
127    protected boolean started;
128
129    protected int maxFileLength = DEFAULT_MAX_FILE_LENGTH;
130    protected int writeBatchSize = DEFAULT_MAX_WRITE_BATCH_SIZE;
131
132    protected FileAppender appender;
133    protected DataFileAccessorPool accessorPool;
134
135    protected Map<Integer, DataFile> fileMap = new HashMap<Integer, DataFile>();
136    protected Map<File, DataFile> fileByFileMap = new LinkedHashMap<File, DataFile>();
137    protected LinkedNodeList<DataFile> dataFiles = new LinkedNodeList<DataFile>();
138
139    protected final AtomicReference<Location> lastAppendLocation = new AtomicReference<Location>();
140    protected Runnable cleanupTask;
141    protected AtomicLong totalLength = new AtomicLong();
142    protected boolean archiveDataLogs;
143    private ReplicationTarget replicationTarget;
144    protected boolean checksum;
145    protected boolean checkForCorruptionOnStartup;
146    protected boolean enableAsyncDiskSync = true;
147    private Timer timer;
148
149    protected PreallocationScope preallocationScope = PreallocationScope.ENTIRE_JOURNAL;
150    protected PreallocationStrategy preallocationStrategy = PreallocationStrategy.SPARSE_FILE;
151
152    public interface DataFileRemovedListener {
153        void fileRemoved(DataFile datafile);
154    }
155
156    private DataFileRemovedListener dataFileRemovedListener;
157
158    public synchronized void start() throws IOException {
159        if (started) {
160            return;
161        }
162
163        long start = System.currentTimeMillis();
164        accessorPool = new DataFileAccessorPool(this);
165        started = true;
166
167        appender = callerBufferAppender ? new CallerBufferingDataFileAppender(this) : new DataFileAppender(this);
168
169        File[] files = directory.listFiles(new FilenameFilter() {
170            public boolean accept(File dir, String n) {
171                return dir.equals(directory) && n.startsWith(filePrefix) && n.endsWith(fileSuffix);
172            }
173        });
174
175        if (files != null) {
176            for (File file : files) {
177                try {
178                    String n = file.getName();
179                    String numStr = n.substring(filePrefix.length(), n.length()-fileSuffix.length());
180                    int num = Integer.parseInt(numStr);
181                    DataFile dataFile = new DataFile(file, num);
182                    fileMap.put(dataFile.getDataFileId(), dataFile);
183                    totalLength.addAndGet(dataFile.getLength());
184                } catch (NumberFormatException e) {
185                    // Ignore file that do not match the pattern.
186                }
187            }
188
189            // Sort the list so that we can link the DataFiles together in the
190            // right order.
191            List<DataFile> l = new ArrayList<DataFile>(fileMap.values());
192            Collections.sort(l);
193            for (DataFile df : l) {
194                if (df.getLength() == 0) {
195                    // possibly the result of a previous failed write
196                    LOG.info("ignoring zero length, partially initialised journal data file: " + df);
197                    continue;
198                }
199                dataFiles.addLast(df);
200                fileByFileMap.put(df.getFile(), df);
201
202                if( isCheckForCorruptionOnStartup() ) {
203                    lastAppendLocation.set(recoveryCheck(df));
204                }
205            }
206        }
207
208        getCurrentWriteFile();
209
210        if (preallocationStrategy != PreallocationStrategy.SPARSE_FILE && maxFileLength != DEFAULT_MAX_FILE_LENGTH) {
211            LOG.warn("You are using a preallocation strategy and journal maxFileLength which should be benchmarked accordingly to not introduce unexpected latencies.");
212        }
213
214        if( lastAppendLocation.get()==null ) {
215            DataFile df = dataFiles.getTail();
216            lastAppendLocation.set(recoveryCheck(df));
217        }
218
219        // ensure we don't report unused space of last journal file in size metric
220        if (totalLength.get() > maxFileLength && lastAppendLocation.get().getOffset() > 0) {
221            totalLength.addAndGet(lastAppendLocation.get().getOffset() - maxFileLength);
222        }
223
224
225        cleanupTask = new Runnable() {
226            public void run() {
227                cleanup();
228            }
229        };
230        this.timer = new Timer("KahaDB Scheduler", true);
231        TimerTask task = new SchedulerTimerTask(cleanupTask);
232        this.timer.scheduleAtFixedRate(task, DEFAULT_CLEANUP_INTERVAL,DEFAULT_CLEANUP_INTERVAL);
233        long end = System.currentTimeMillis();
234        LOG.trace("Startup took: "+(end-start)+" ms");
235    }
236
237
238    public void preallocateEntireJournalDataFile(RecoverableRandomAccessFile file) {
239
240        if (PreallocationScope.ENTIRE_JOURNAL == preallocationScope) {
241
242            if (PreallocationStrategy.OS_KERNEL_COPY == preallocationStrategy) {
243                doPreallocationKernelCopy(file);
244
245            }else if (PreallocationStrategy.ZEROS == preallocationStrategy) {
246                doPreallocationZeros(file);
247            }
248            else {
249                doPreallocationSparseFile(file);
250            }
251        }else {
252            LOG.info("Using journal preallocation scope of batch allocation");
253        }
254    }
255
256    private void doPreallocationSparseFile(RecoverableRandomAccessFile file) {
257        try {
258            file.seek(maxFileLength - 1);
259            file.write((byte)0x00);
260        } catch (IOException e) {
261            LOG.error("Could not preallocate journal file with sparse file! Will continue without preallocation", e);
262        }
263    }
264
265    private void doPreallocationZeros(RecoverableRandomAccessFile file) {
266        ByteBuffer buffer = ByteBuffer.allocate(maxFileLength);
267        for (int i = 0; i < maxFileLength; i++) {
268            buffer.put((byte) 0x00);
269        }
270        buffer.flip();
271
272        try {
273            FileChannel channel = file.getChannel();
274            channel.write(buffer);
275            channel.force(false);
276            channel.position(0);
277        } catch (IOException e) {
278            LOG.error("Could not preallocate journal file with zeros! Will continue without preallocation", e);
279        }
280    }
281
282    private void doPreallocationKernelCopy(RecoverableRandomAccessFile file) {
283
284        // create a template file that will be used to pre-allocate the journal files
285        File templateFile = createJournalTemplateFile();
286
287        RandomAccessFile templateRaf = null;
288        try {
289            templateRaf = new RandomAccessFile(templateFile, "rw");
290            templateRaf.setLength(maxFileLength);
291            templateRaf.getChannel().force(true);
292            templateRaf.getChannel().transferTo(0, getMaxFileLength(), file.getChannel());
293            templateRaf.close();
294            templateFile.delete();
295        } catch (FileNotFoundException e) {
296            LOG.error("Could not find the template file on disk at " + templateFile.getAbsolutePath(), e);
297        } catch (IOException e) {
298            LOG.error("Could not transfer the template file to journal, transferFile=" + templateFile.getAbsolutePath(), e);
299        }
300    }
301
302    private File createJournalTemplateFile() {
303        String fileName = "db-log.template";
304        File rc  = new File(directory, fileName);
305        if (rc.exists()) {
306            System.out.println("deleting file because it already exists...");
307            rc.delete();
308
309        }
310        return rc;
311    }
312
313    private static byte[] bytes(String string) {
314        try {
315            return string.getBytes("UTF-8");
316        } catch (UnsupportedEncodingException e) {
317            throw new RuntimeException(e);
318        }
319    }
320
321    protected Location recoveryCheck(DataFile dataFile) throws IOException {
322        Location location = new Location();
323        location.setDataFileId(dataFile.getDataFileId());
324        location.setOffset(0);
325
326        DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
327        try {
328            while( true ) {
329                int size = checkBatchRecord(reader, location.getOffset());
330                if ( size>=0 && location.getOffset()+BATCH_CONTROL_RECORD_SIZE+size <= dataFile.getLength()) {
331                    location.setOffset(location.getOffset()+BATCH_CONTROL_RECORD_SIZE+size);
332                } else {
333
334                    // Perhaps it's just some corruption... scan through the file to find the next valid batch record.  We
335                    // may have subsequent valid batch records.
336                    int nextOffset = findNextBatchRecord(reader, location.getOffset()+1);
337                    if( nextOffset >=0 ) {
338                        Sequence sequence = new Sequence(location.getOffset(), nextOffset - 1);
339                        LOG.info("Corrupt journal records found in '"+dataFile.getFile()+"' between offsets: "+sequence);
340                        dataFile.corruptedBlocks.add(sequence);
341                        location.setOffset(nextOffset);
342                    } else {
343                        break;
344                    }
345                }
346            }
347
348        } catch (IOException e) {
349        } finally {
350            accessorPool.closeDataFileAccessor(reader);
351        }
352
353        int existingLen = dataFile.getLength();
354        dataFile.setLength(location.getOffset());
355        if (existingLen > dataFile.getLength()) {
356            totalLength.addAndGet(dataFile.getLength() - existingLen);
357        }
358
359        if( !dataFile.corruptedBlocks.isEmpty() ) {
360            // Is the end of the data file corrupted?
361            if( dataFile.corruptedBlocks.getTail().getLast()+1 == location.getOffset() ) {
362                dataFile.setLength((int) dataFile.corruptedBlocks.removeLastSequence().getFirst());
363            }
364        }
365
366        return location;
367    }
368
369    private int findNextBatchRecord(DataFileAccessor reader, int offset) throws IOException {
370        ByteSequence header = new ByteSequence(BATCH_CONTROL_RECORD_HEADER);
371        byte data[] = new byte[1024*4];
372        ByteSequence bs = new ByteSequence(data, 0, reader.read(offset, data));
373
374        int pos = 0;
375        while( true ) {
376            pos = bs.indexOf(header, pos);
377            if( pos >= 0 ) {
378                return offset+pos;
379            } else {
380                // need to load the next data chunck in..
381                if( bs.length != data.length ) {
382                    // If we had a short read then we were at EOF
383                    return -1;
384                }
385                offset += bs.length-BATCH_CONTROL_RECORD_HEADER.length;
386                bs = new ByteSequence(data, 0, reader.read(offset, data));
387                pos=0;
388            }
389        }
390    }
391
392
393    public int checkBatchRecord(DataFileAccessor reader, int offset) throws IOException {
394        byte controlRecord[] = new byte[BATCH_CONTROL_RECORD_SIZE];
395        DataByteArrayInputStream controlIs = new DataByteArrayInputStream(controlRecord);
396
397        reader.readFully(offset, controlRecord);
398
399        // Assert that it's  a batch record.
400        for( int i=0; i < BATCH_CONTROL_RECORD_HEADER.length; i++ ) {
401            if( controlIs.readByte() != BATCH_CONTROL_RECORD_HEADER[i] ) {
402                return -1;
403            }
404        }
405
406        int size = controlIs.readInt();
407        if (size < 0 || size > (Integer.MAX_VALUE - BATCH_CONTROL_RECORD_SIZE)) {
408            return -1;
409        }
410
411        if( isChecksum() ) {
412
413            long expectedChecksum = controlIs.readLong();
414            if( expectedChecksum == 0 ) {
415                // Checksuming was not enabled when the record was stored.
416                // we can't validate the record :(
417                return size;
418            }
419
420            byte data[] = new byte[size];
421            reader.readFully(offset+BATCH_CONTROL_RECORD_SIZE, data);
422
423            Checksum checksum = new Adler32();
424            checksum.update(data, 0, data.length);
425
426            if( expectedChecksum!=checksum.getValue() ) {
427                return -1;
428            }
429
430        }
431        return size;
432    }
433
434
435    void addToTotalLength(int size) {
436        totalLength.addAndGet(size);
437    }
438
439    public long length() {
440        return totalLength.get();
441    }
442
443    synchronized DataFile getCurrentWriteFile() throws IOException {
444        if (dataFiles.isEmpty()) {
445            rotateWriteFile();
446        }
447        return dataFiles.getTail();
448    }
449
450    synchronized DataFile rotateWriteFile() {
451        int nextNum = !dataFiles.isEmpty() ? dataFiles.getTail().getDataFileId().intValue() + 1 : 1;
452        File file = getFile(nextNum);
453        DataFile nextWriteFile = new DataFile(file, nextNum);
454        fileMap.put(nextWriteFile.getDataFileId(), nextWriteFile);
455        fileByFileMap.put(file, nextWriteFile);
456        dataFiles.addLast(nextWriteFile);
457        return nextWriteFile;
458    }
459
460    public File getFile(int nextNum) {
461        String fileName = filePrefix + nextNum + fileSuffix;
462        File file = new File(directory, fileName);
463        return file;
464    }
465
466    synchronized DataFile getDataFile(Location item) throws IOException {
467        Integer key = Integer.valueOf(item.getDataFileId());
468        DataFile dataFile = fileMap.get(key);
469        if (dataFile == null) {
470            LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
471            throw new IOException("Could not locate data file " + getFile(item.getDataFileId()));
472        }
473        return dataFile;
474    }
475
476    synchronized File getFile(Location item) throws IOException {
477        Integer key = Integer.valueOf(item.getDataFileId());
478        DataFile dataFile = fileMap.get(key);
479        if (dataFile == null) {
480            LOG.error("Looking for key " + key + " but not found in fileMap: " + fileMap);
481            throw new IOException("Could not locate data file " + getFile(item.getDataFileId()));
482        }
483        return dataFile.getFile();
484    }
485
486    private DataFile getNextDataFile(DataFile dataFile) {
487        return dataFile.getNext();
488    }
489
490    public void close() throws IOException {
491        synchronized (this) {
492            if (!started) {
493                return;
494            }
495            if (this.timer != null) {
496                this.timer.cancel();
497            }
498            accessorPool.close();
499        }
500        // the appender can be calling back to to the journal blocking a close AMQ-5620
501        appender.close();
502        synchronized (this) {
503            fileMap.clear();
504            fileByFileMap.clear();
505            dataFiles.clear();
506            lastAppendLocation.set(null);
507            started = false;
508        }
509    }
510
511    public synchronized void cleanup() {
512        if (accessorPool != null) {
513            accessorPool.disposeUnused();
514        }
515    }
516
517    public synchronized boolean delete() throws IOException {
518
519        // Close all open file handles...
520        appender.close();
521        accessorPool.close();
522
523        boolean result = true;
524        for (Iterator<DataFile> i = fileMap.values().iterator(); i.hasNext();) {
525            DataFile dataFile = i.next();
526            result &= dataFile.delete();
527        }
528        totalLength.set(0);
529        fileMap.clear();
530        fileByFileMap.clear();
531        lastAppendLocation.set(null);
532        dataFiles = new LinkedNodeList<DataFile>();
533
534        // reopen open file handles...
535        accessorPool = new DataFileAccessorPool(this);
536        appender = new DataFileAppender(this);
537        return result;
538    }
539
540    public synchronized void removeDataFiles(Set<Integer> files) throws IOException {
541        for (Integer key : files) {
542            // Can't remove the data file (or subsequent files) that is currently being written to.
543            if( key >= lastAppendLocation.get().getDataFileId() ) {
544                continue;
545            }
546            DataFile dataFile = fileMap.get(key);
547            if( dataFile!=null ) {
548                forceRemoveDataFile(dataFile);
549            }
550        }
551    }
552
553    private synchronized void forceRemoveDataFile(DataFile dataFile) throws IOException {
554        accessorPool.disposeDataFileAccessors(dataFile);
555        fileByFileMap.remove(dataFile.getFile());
556        fileMap.remove(dataFile.getDataFileId());
557        totalLength.addAndGet(-dataFile.getLength());
558        dataFile.unlink();
559        if (archiveDataLogs) {
560            File directoryArchive = getDirectoryArchive();
561            if (directoryArchive.exists()) {
562                LOG.debug("Archive directory exists: {}", directoryArchive);
563            } else {
564                if (directoryArchive.isAbsolute())
565                if (LOG.isDebugEnabled()) {
566                    LOG.debug("Archive directory [{}] does not exist - creating it now",
567                            directoryArchive.getAbsolutePath());
568                }
569                IOHelper.mkdirs(directoryArchive);
570            }
571            LOG.debug("Moving data file {} to {} ", dataFile, directoryArchive.getCanonicalPath());
572            dataFile.move(directoryArchive);
573            LOG.debug("Successfully moved data file");
574        } else {
575            LOG.debug("Deleting data file: {}", dataFile);
576            if ( dataFile.delete() ) {
577                LOG.debug("Discarded data file: {}", dataFile);
578            } else {
579                LOG.warn("Failed to discard data file : {}", dataFile.getFile());
580            }
581        }
582        if (dataFileRemovedListener != null) {
583            dataFileRemovedListener.fileRemoved(dataFile);
584        }
585    }
586
587    /**
588     * @return the maxFileLength
589     */
590    public int getMaxFileLength() {
591        return maxFileLength;
592    }
593
594    /**
595     * @param maxFileLength the maxFileLength to set
596     */
597    public void setMaxFileLength(int maxFileLength) {
598        this.maxFileLength = maxFileLength;
599    }
600
601    @Override
602    public String toString() {
603        return directory.toString();
604    }
605
606    public synchronized Location getNextLocation(Location location) throws IOException, IllegalStateException {
607
608        Location cur = null;
609        while (true) {
610            if (cur == null) {
611                if (location == null) {
612                    DataFile head = dataFiles.getHead();
613                    if( head == null ) {
614                        return null;
615                    }
616                    cur = new Location();
617                    cur.setDataFileId(head.getDataFileId());
618                    cur.setOffset(0);
619                } else {
620                    // Set to the next offset..
621                    if (location.getSize() == -1) {
622                        cur = new Location(location);
623                    } else {
624                        cur = new Location(location);
625                        cur.setOffset(location.getOffset() + location.getSize());
626                    }
627                }
628            } else {
629                cur.setOffset(cur.getOffset() + cur.getSize());
630            }
631
632            DataFile dataFile = getDataFile(cur);
633
634            // Did it go into the next file??
635            if (dataFile.getLength() <= cur.getOffset()) {
636                dataFile = getNextDataFile(dataFile);
637                if (dataFile == null) {
638                    return null;
639                } else {
640                    cur.setDataFileId(dataFile.getDataFileId().intValue());
641                    cur.setOffset(0);
642                }
643            }
644
645            // Load in location size and type.
646            DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
647            try {
648                reader.readLocationDetails(cur);
649            } finally {
650                accessorPool.closeDataFileAccessor(reader);
651            }
652
653            Sequence corruptedRange = dataFile.corruptedBlocks.get(cur.getOffset());
654            if (corruptedRange != null) {
655                // skip corruption
656                cur.setSize((int) corruptedRange.range());
657            } else if (cur.getType() == 0) {
658                // eof - jump to next datafile
659                cur.setOffset(maxFileLength);
660            } else if (cur.getType() == USER_RECORD_TYPE) {
661                // Only return user records.
662                return cur;
663            }
664        }
665    }
666
667    public synchronized ByteSequence read(Location location) throws IOException, IllegalStateException {
668        DataFile dataFile = getDataFile(location);
669        DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile);
670        ByteSequence rc = null;
671        try {
672            rc = reader.readRecord(location);
673        } finally {
674            accessorPool.closeDataFileAccessor(reader);
675        }
676        return rc;
677    }
678
679    public Location write(ByteSequence data, boolean sync) throws IOException, IllegalStateException {
680        Location loc = appender.storeItem(data, Location.USER_TYPE, sync);
681        return loc;
682    }
683
684    public Location write(ByteSequence data, Runnable onComplete) throws IOException, IllegalStateException {
685        Location loc = appender.storeItem(data, Location.USER_TYPE, onComplete);
686        return loc;
687    }
688
689    public void update(Location location, ByteSequence data, boolean sync) throws IOException {
690        DataFile dataFile = getDataFile(location);
691        DataFileAccessor updater = accessorPool.openDataFileAccessor(dataFile);
692        try {
693            updater.updateRecord(location, data, sync);
694        } finally {
695            accessorPool.closeDataFileAccessor(updater);
696        }
697    }
698
699    public PreallocationStrategy getPreallocationStrategy() {
700        return preallocationStrategy;
701    }
702
703    public void setPreallocationStrategy(PreallocationStrategy preallocationStrategy) {
704        this.preallocationStrategy = preallocationStrategy;
705    }
706
707    public PreallocationScope getPreallocationScope() {
708        return preallocationScope;
709    }
710
711    public void setPreallocationScope(PreallocationScope preallocationScope) {
712        this.preallocationScope = preallocationScope;
713    }
714
715    public File getDirectory() {
716        return directory;
717    }
718
719    public void setDirectory(File directory) {
720        this.directory = directory;
721    }
722
723    public String getFilePrefix() {
724        return filePrefix;
725    }
726
727    public void setFilePrefix(String filePrefix) {
728        this.filePrefix = filePrefix;
729    }
730
731    public Map<WriteKey, WriteCommand> getInflightWrites() {
732        return inflightWrites;
733    }
734
735    public Location getLastAppendLocation() {
736        return lastAppendLocation.get();
737    }
738
739    public void setLastAppendLocation(Location lastSyncedLocation) {
740        this.lastAppendLocation.set(lastSyncedLocation);
741    }
742
743    public File getDirectoryArchive() {
744        if (!directoryArchiveOverridden && (directoryArchive == null)) {
745            // create the directoryArchive relative to the journal location
746            directoryArchive = new File(directory.getAbsolutePath() +
747                    File.separator + DEFAULT_ARCHIVE_DIRECTORY);
748        }
749        return directoryArchive;
750    }
751
752    public void setDirectoryArchive(File directoryArchive) {
753        directoryArchiveOverridden = true;
754        this.directoryArchive = directoryArchive;
755    }
756
757    public boolean isArchiveDataLogs() {
758        return archiveDataLogs;
759    }
760
761    public void setArchiveDataLogs(boolean archiveDataLogs) {
762        this.archiveDataLogs = archiveDataLogs;
763    }
764
765    synchronized public Integer getCurrentDataFileId() {
766        if (dataFiles.isEmpty())
767            return null;
768        return dataFiles.getTail().getDataFileId();
769    }
770
771    /**
772     * Get a set of files - only valid after start()
773     *
774     * @return files currently being used
775     */
776    public Set<File> getFiles() {
777        return fileByFileMap.keySet();
778    }
779
780    public synchronized Map<Integer, DataFile> getFileMap() {
781        return new TreeMap<Integer, DataFile>(fileMap);
782    }
783
784    public long getDiskSize() {
785        return totalLength.get();
786    }
787
788    public void setReplicationTarget(ReplicationTarget replicationTarget) {
789        this.replicationTarget = replicationTarget;
790    }
791    public ReplicationTarget getReplicationTarget() {
792        return replicationTarget;
793    }
794
795    public String getFileSuffix() {
796        return fileSuffix;
797    }
798
799    public void setFileSuffix(String fileSuffix) {
800        this.fileSuffix = fileSuffix;
801    }
802
803    public boolean isChecksum() {
804        return checksum;
805    }
806
807    public void setChecksum(boolean checksumWrites) {
808        this.checksum = checksumWrites;
809    }
810
811    public boolean isCheckForCorruptionOnStartup() {
812        return checkForCorruptionOnStartup;
813    }
814
815    public void setCheckForCorruptionOnStartup(boolean checkForCorruptionOnStartup) {
816        this.checkForCorruptionOnStartup = checkForCorruptionOnStartup;
817    }
818
819    public void setWriteBatchSize(int writeBatchSize) {
820        this.writeBatchSize = writeBatchSize;
821    }
822
823    public int getWriteBatchSize() {
824        return writeBatchSize;
825    }
826
827    public void setSizeAccumulator(AtomicLong storeSizeAccumulator) {
828       this.totalLength = storeSizeAccumulator;
829    }
830
831    public void setEnableAsyncDiskSync(boolean val) {
832        this.enableAsyncDiskSync = val;
833    }
834
835    public boolean isEnableAsyncDiskSync() {
836        return enableAsyncDiskSync;
837    }
838
839    public void setDataFileRemovedListener(DataFileRemovedListener dataFileRemovedListener) {
840        this.dataFileRemovedListener = dataFileRemovedListener;
841    }
842
843    public static class WriteCommand extends LinkedNode<WriteCommand> {
844        public final Location location;
845        public final ByteSequence data;
846        final boolean sync;
847        public final Runnable onComplete;
848
849        public WriteCommand(Location location, ByteSequence data, boolean sync) {
850            this.location = location;
851            this.data = data;
852            this.sync = sync;
853            this.onComplete = null;
854        }
855
856        public WriteCommand(Location location, ByteSequence data, Runnable onComplete) {
857            this.location = location;
858            this.data = data;
859            this.onComplete = onComplete;
860            this.sync = false;
861        }
862    }
863
864    public static class WriteKey {
865        private final int file;
866        private final long offset;
867        private final int hash;
868
869        public WriteKey(Location item) {
870            file = item.getDataFileId();
871            offset = item.getOffset();
872            // TODO: see if we can build a better hash
873            hash = (int)(file ^ offset);
874        }
875
876        public int hashCode() {
877            return hash;
878        }
879
880        public boolean equals(Object obj) {
881            if (obj instanceof WriteKey) {
882                WriteKey di = (WriteKey)obj;
883                return di.file == file && di.offset == offset;
884            }
885            return false;
886        }
887    }
888}