From 724e7e84ab14c39ba2e698eac6d4136349c54571 Mon Sep 17 00:00:00 2001 From: nathan Date: Tue, 16 Sep 2014 11:20:36 +0200 Subject: [PATCH] Added key/value pair storage with weakreference cache and command queueing --- Dorkbox-Util/src/dorkbox/util/DelayTimer.java | 19 +- Dorkbox-Util/src/dorkbox/util/Storage.java | 353 --------- Dorkbox-Util/src/dorkbox/util/Sys.java | 14 +- .../dorkbox/util/bytes/ByteArrayWrapper.java | 3 + .../src/dorkbox/util/storage/Metadata.java | 265 +++++++ .../src/dorkbox/util/storage/Storage.java | 716 ++++++++++++++++++ .../src/dorkbox/util/storage/StorageBase.java | 566 ++++++++++++++ .../test/dorkbox/util/StorageTest.java | 446 ++++++++++- 8 files changed, 1984 insertions(+), 398 deletions(-) delete mode 100644 Dorkbox-Util/src/dorkbox/util/Storage.java create mode 100644 Dorkbox-Util/src/dorkbox/util/storage/Metadata.java create mode 100644 Dorkbox-Util/src/dorkbox/util/storage/Storage.java create mode 100644 Dorkbox-Util/src/dorkbox/util/storage/StorageBase.java diff --git a/Dorkbox-Util/src/dorkbox/util/DelayTimer.java b/Dorkbox-Util/src/dorkbox/util/DelayTimer.java index 0179261..fb18015 100644 --- a/Dorkbox-Util/src/dorkbox/util/DelayTimer.java +++ b/Dorkbox-Util/src/dorkbox/util/DelayTimer.java @@ -13,15 +13,23 @@ public class DelayTimer { private final String name; private final Callback listener; - private Timer timer; + private volatile Timer timer; + private final boolean isDaemon; public DelayTimer(Callback listener) { - this(null, listener); + this(null, true, listener); } - public DelayTimer(String name, Callback listener) { + /** + * Sometimes you want to make sure that this timer will complete, even if the calling thread has terminated. + * @param name the name of the thread (if you want to specify one) + * @param isDaemon true if you want this timer to be run on a daemon thread + * @param listener the callback listener to execute + */ + public DelayTimer(String name, boolean isDaemon, Callback listener) { this.name = name; this.listener = listener; + this.isDaemon = isDaemon; } /** @@ -50,12 +58,11 @@ public class DelayTimer { if (delay > 0) { if (this.name != null) { - this.timer = new Timer(this.name, true); + this.timer = new Timer(this.name, this.isDaemon); } else { - this.timer = new Timer(true); + this.timer = new Timer(this.isDaemon); } - TimerTask t = new TimerTask() { @Override public void run() { diff --git a/Dorkbox-Util/src/dorkbox/util/Storage.java b/Dorkbox-Util/src/dorkbox/util/Storage.java deleted file mode 100644 index 2085826..0000000 --- a/Dorkbox-Util/src/dorkbox/util/Storage.java +++ /dev/null @@ -1,353 +0,0 @@ -package dorkbox.util; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.io.RandomAccessFile; -import java.lang.ref.WeakReference; -import java.lang.reflect.Field; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.zip.DeflaterOutputStream; -import java.util.zip.InflaterInputStream; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.esotericsoftware.kryo.Kryo; -import com.esotericsoftware.kryo.io.Input; -import com.esotericsoftware.kryo.io.Output; - -/** - * Nothing spectacular about this storage -- it allows for persistent storage of objects to disk. - */ -public class Storage { - private static final Logger logger = LoggerFactory.getLogger(Storage.class); - - private static Map storages = new HashMap(1); - - private final File file; - - private long milliSeconds = 3000L; - private DelayTimer timer; - private WeakReference objectReference; - - private Kryo kryo; - - @SuppressWarnings({"rawtypes","unchecked"}) - public static Storage load(File file, Object loadIntoObject) { - if (file == null) { - throw new IllegalArgumentException("file cannot be null!"); - } - - if (loadIntoObject == null) { - throw new IllegalArgumentException("loadIntoObject cannot be null!"); - } - - // if we load from a NEW storage at the same location as an ALREADY EXISTING storage, - // without saving the existing storage first --- whoops! - synchronized (storages) { - Storage storage = storages.get(file); - if (storage != null) { - boolean waiting = storage.timer.isWaiting(); - if (waiting) { - storage.saveNow(); - } - - // why load it from disk again? just copy out the values! - synchronized (storage) { - // have to load from disk! - Object source = storage.load(file, loadIntoObject.getClass()); - - Object orig = storage.objectReference.get(); - if (orig != null) { - if (orig != loadIntoObject) { - storage.objectReference = new WeakReference(loadIntoObject); - } - - } else { - // whoopie - the old one got GC'd! (for whatever reason, it can be legit) - storage.objectReference = new WeakReference(loadIntoObject); - } - - if (source != null && source != orig) { - copyFields(source, loadIntoObject); - } - } - } else { - // this will load it from disk again, if necessary - storage = new Storage(file, loadIntoObject); - storages.put(file, storage); - - // have to load from disk! - Object source = storage.load(file, loadIntoObject.getClass()); - if (source != null) { - copyFields(source, loadIntoObject); - } - } - return storage; - } - } - - - /** - * Also loads the saved object into the passed-in object. This is sorta slow (nothing is cached for speed!) - * - * If the saved object has more fields than the loadIntoObject, only the fields in loadIntoObject will be - * populated. If the loadIntoObject has more fields than the saved object, then the loadIntoObject will not - * have those fields changed. - */ - @SuppressWarnings({"rawtypes","unchecked"}) - private Storage(File file, Object loadIntoObject) { - this.file = file.getAbsoluteFile(); - File parentFile = this.file.getParentFile(); - if (parentFile != null) { - parentFile.mkdirs(); - } - - this.kryo = new Kryo(); - this.kryo.setRegistrationRequired(false); - - this.objectReference = new WeakReference(loadIntoObject); - this.timer = new DelayTimer("Storage Writer", new DelayTimer.Callback() { - @Override - public void execute() { - save0(); - } - }); - } - - /** - * Loads the saved object into the passed-in object. This is sorta slow (nothing is cached for speed!) - * - * If the saved object has more fields than the loadIntoObject, only the fields in loadIntoObject will be - * populated. If the loadIntoObject has more fields than the saved object, then the loadIntoObject will not - * have those fields changed. - */ - public final void load(Object loadIntoObject) { - if (loadIntoObject == null) { - throw new IllegalArgumentException("loadIntoObject cannot be null!"); - } - - - // if we load from a NEW storage at the same location as an ALREADY EXISTING storage, - // without saving the existing storage first --- whoops! - synchronized (storages) { - File file2 = this.file; - - Storage storage = storages.get(file2); - Object source = null; - if (storage != null) { - boolean waiting = storage.timer.isWaiting(); - if (waiting) { - storage.saveNow(); - } - - // why load it from disk again? just copy out the values! - source = storage.objectReference.get(); - if (source == null) { - // have to load from disk! - source = load(file2, loadIntoObject.getClass()); - } - } - - if (source != null) { - copyFields(source, loadIntoObject); - } - } - } - - /** - * @param delay milliseconds to wait - */ - public final void setSaveDelay(long milliSeconds) { - this.milliSeconds = milliSeconds; - } - - /** - * Immediately save the storage to disk - */ - public final synchronized void saveNow() { - this.timer.delay(0L); - } - - /** - * Save the storage to disk, once xxxx milli-seconds have passed. - * This is to help prevent thrashing the disk, or wearing it out on multiple, rapid, changes. - */ - public final synchronized void save() { - this.timer.delay(this.milliSeconds); - } - - private synchronized void save0() { - Object object = Storage.this.objectReference.get(); - - if (object == null) { - Storage.logger.error("Object has been erased and is no longer available to save!"); - return; - } - - Class class1 = object.getClass(); - - RandomAccessFile raf = null; - Output output = null; - try { - raf = new RandomAccessFile(this.file, "rw"); - OutputStream outputStream = new DeflaterOutputStream(new FileOutputStream(raf.getFD())); - output = new Output(outputStream, 1024); // write 1024 at a time - - this.kryo.writeObject(output, object); - output.flush(); - - load(this.file, class1); - - } catch (Exception e) { - Storage.logger.error("Error saving the data!", e); - } finally { - if (output != null) { - output.close(); - } - if (raf != null) { - try { - raf.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - - @SuppressWarnings("unchecked") - private synchronized T load(File file, Class clazz) { - if (file.length() == 0) { - return null; - } - - RandomAccessFile raf = null; - Input input = null; - try { - raf = new RandomAccessFile(file, "r"); - input = new Input(new InflaterInputStream(new FileInputStream(raf.getFD())), 1024); // read 1024 at a time - - Object readObject = this.kryo.readObject(input, clazz); - return (T) readObject; - } catch (Exception e) { - logger.error("Error reading from '{}'! Perhaps the file is corrupt?", file.getAbsolutePath()); - return null; - } finally { - if (input != null) { - input.close(); - } - if (raf != null) { - try { - raf.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (this.file == null ? 0 : this.file.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Storage other = (Storage) obj; - if (this.file == null) { - if (other.file != null) { - return false; - } - } else if (!this.file.equals(other.file)) { - return false; - } - return true; - } - - - @Override - public String toString() { - return "Storage [" + this.file + "]"; - } - - - private static void copyFields(Object source, Object dest) { - Class sourceClass = source.getClass(); - Class destClass = dest.getClass(); - - if (sourceClass != destClass) { - throw new IllegalArgumentException("Source and Dest objects are not of the same class!"); - } - - // have to walk up the object hierarchy. - while (destClass != Object.class) { - Field[] destFields = destClass.getDeclaredFields(); - - for (Field destField : destFields) { - String name = destField.getName(); - try { - Field sourceField = sourceClass.getDeclaredField(name); - destField.setAccessible(true); - sourceField.setAccessible(true); - - Object sourceObj = sourceField.get(source); - - if (sourceObj instanceof Map) { - Object destObj = destField.get(dest); - if (destObj == null) { - destField.set(dest, sourceObj); - } else if (destObj instanceof Map) { - @SuppressWarnings("unchecked") - Map sourceMap = (Map) sourceObj; - @SuppressWarnings("unchecked") - Map destMap = (Map) destObj; - - destMap.clear(); - Iterator entries = sourceMap.entrySet().iterator(); - while (entries.hasNext()) { - Map.Entry entry = (Map.Entry)entries.next(); - Object key = entry.getKey(); - Object value = entry.getValue(); - destMap.put(key, value); - } - - } else { - logger.error("Incompatible field type! '{}'", name); - } - } else { - destField.set(dest, sourceObj); - } - } catch (Exception e) { - logger.error("Unable to copy field: {}", name, e); - } - } - - destClass = destClass.getSuperclass(); - sourceClass = sourceClass.getSuperclass(); - } - } - - public static void shutdown() { - synchronized(storages) { - storages.clear(); - } - } -} diff --git a/Dorkbox-Util/src/dorkbox/util/Sys.java b/Dorkbox-Util/src/dorkbox/util/Sys.java index eec30d6..1f05557 100644 --- a/Dorkbox-Util/src/dorkbox/util/Sys.java +++ b/Dorkbox-Util/src/dorkbox/util/Sys.java @@ -520,14 +520,22 @@ public class Sys { } public static void printArray(byte[] bytes, int length, boolean includeByteCount) { + printArray(bytes, length, includeByteCount, 40); + } + + public static void printArray(byte[] bytes, int length, boolean includeByteCount, int lineLength) { if (includeByteCount) { System.err.println("Bytes: " + length); } - int mod = 40; int comma = length-1; - StringBuilder builder = new StringBuilder(length + length/mod + 2); + StringBuilder builder; + if (lineLength > 0) { + builder = new StringBuilder(length + comma + length/lineLength + 2); + } else { + builder = new StringBuilder(length + comma + 2); + } builder.append("{"); for (int i = 0; i < length; i++) { @@ -535,7 +543,7 @@ public class Sys { if (i < comma) { builder.append(","); } - if (i > 0 && i%mod == 0) { + if (i > 0 && lineLength > 0 && i%lineLength == 0) { builder.append(OS.LINE_SEPARATOR); } } diff --git a/Dorkbox-Util/src/dorkbox/util/bytes/ByteArrayWrapper.java b/Dorkbox-Util/src/dorkbox/util/bytes/ByteArrayWrapper.java index 3504eb4..791cd37 100644 --- a/Dorkbox-Util/src/dorkbox/util/bytes/ByteArrayWrapper.java +++ b/Dorkbox-Util/src/dorkbox/util/bytes/ByteArrayWrapper.java @@ -54,11 +54,14 @@ public final class ByteArrayWrapper { if (!(other instanceof ByteArrayWrapper)) { return false; } + + // CANNOT be null, so we don't have to null check! return Arrays.equals(this.data, ((ByteArrayWrapper) other).data); } @Override public int hashCode() { + // CANNOT be null, so we don't have to null check! return Arrays.hashCode(this.data); } } \ No newline at end of file diff --git a/Dorkbox-Util/src/dorkbox/util/storage/Metadata.java b/Dorkbox-Util/src/dorkbox/util/storage/Metadata.java new file mode 100644 index 0000000..e1b439d --- /dev/null +++ b/Dorkbox-Util/src/dorkbox/util/storage/Metadata.java @@ -0,0 +1,265 @@ +package dorkbox.util.storage; + +import java.io.ByteArrayOutputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.lang.ref.WeakReference; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.InflaterInputStream; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.io.Output; + +import dorkbox.util.bytes.ByteArrayWrapper; + +public class Metadata { + // The length of a key in the index. + // SHA256 is 32 bytes long. + private static final int KEY_SIZE = 32; + + // Number of bytes in the record header. + private static final int POINTER_INFO_SIZE = 16; + + // The total length of one index entry - the key length plus the record header length. + static final int INDEX_ENTRY_LENGTH = KEY_SIZE + POINTER_INFO_SIZE; + + + /** + * This is the key to the index + */ + final ByteArrayWrapper key; + + /** + * Indicates this header's position in the file index. + */ + volatile int indexPosition; + + /** + * File pointer to the first byte of record data (8 bytes). + */ + volatile long dataPointer; + + /** + * Actual number of bytes of data held in this record (4 bytes). + */ + volatile int dataCount; + + /** + * Number of bytes of data that this record can hold (4 bytes). + */ + volatile int dataCapacity; + + + /** + * The object that has been registered to this key. This is for automatic saving of data (if it's changed) + */ + volatile WeakReference objectReferenceCache; + + + + /** + * Returns a file pointer in the index pointing to the first byte in the KEY located at the given index position. + */ + static final long getMetaDataPointer(int position) { + return StorageBase.FILE_HEADERS_REGION_LENGTH + INDEX_ENTRY_LENGTH * position; + } + + /** + * Returns a file pointer in the index pointing to the first byte in the RECORD pointer located at the given index + * position. + */ + static final long getDataPointer(int position) { + long l = Metadata.getMetaDataPointer(position) + KEY_SIZE; + return l; + } + + + private Metadata(ByteArrayWrapper key) { + this.key = key; + } + + /** we don't know how much data there is until AFTER we write the data */ + Metadata(ByteArrayWrapper key, int recordIndex, long dataPointer) { + this(key, recordIndex, dataPointer, 0); + } + + Metadata(ByteArrayWrapper key, int recordIndex, long dataPointer, int dataCapacity) { + if (key.getBytes().length > KEY_SIZE) { + throw new IllegalArgumentException("Bad record key size: " + dataCapacity); + } + + + this.key = key; + this.indexPosition = recordIndex; + this.dataPointer = dataPointer; + + // we don't always know the size! + this.dataCapacity = dataCapacity; + this.dataCount = dataCapacity; + } + + int getFreeSpace() { + return this.dataCapacity - this.dataCount; + } + + /** + * Reads the ith HEADER (key + metadata) from the index. + */ + static Metadata readHeader(RandomAccessFile file, int position) throws IOException { + byte[] buf = new byte[KEY_SIZE]; + long origHeaderKeyPointer = Metadata.getMetaDataPointer(position); + file.seek(origHeaderKeyPointer); + file.readFully(buf); + + Metadata r = new Metadata(ByteArrayWrapper.wrap(buf)); + r.indexPosition = position; + + long recordHeaderPointer = Metadata.getDataPointer(position); + file.seek(recordHeaderPointer); + r.dataPointer = file.readLong(); + r.dataCapacity = file.readInt(); + r.dataCount = file.readInt(); + + if (r.dataPointer == 0L || r.dataCapacity == 0L || r.dataCount == 0L) { + return null; + } + + return r; + } + + + void write(RandomAccessFile file) throws IOException { + writeMetaDataInfo(file); + writeDataInfo(file); + } + + void writeMetaDataInfo(RandomAccessFile file) throws IOException { + long recordKeyPointer = Metadata.getMetaDataPointer(this.indexPosition); + + file.seek(recordKeyPointer); + file.write(this.key.getBytes()); + } + + void writeDataInfo(RandomAccessFile file) throws IOException { + long recordHeaderPointer = getDataPointer(this.indexPosition); + + file.seek(recordHeaderPointer); + file.writeLong(this.dataPointer); + file.writeInt(this.dataCapacity); + file.writeInt(this.dataCount); + } + + /** + * Move a record to the new INDEX + */ + void move(RandomAccessFile file, int newIndex) throws IOException { + byte[] buf = new byte[KEY_SIZE]; + + long origHeaderKeyPointer = Metadata.getMetaDataPointer(this.indexPosition); + file.seek(origHeaderKeyPointer); + file.readFully(buf); + + long newHeaderKeyPointer = Metadata.getMetaDataPointer(newIndex); + file.seek(newHeaderKeyPointer); + file.write(buf); + +// System.err.println("updating ptr: " + this.indexPosition + " -> " + newIndex + " @ " + newHeaderKeyPointer + "-" + (newHeaderKeyPointer+INDEX_ENTRY_LENGTH)); + this.indexPosition = newIndex; + + writeDataInfo(file); + } + + /** + * Move a record DATA to the new position, and update record header info + */ + void moveData(RandomAccessFile file, long position) throws IOException { + // now we move it to the end of the file. + // we ALSO trim the free space off. + byte[] data = readDataRaw(file); + + this.dataPointer = position; + this.dataCapacity = this.dataCount; + + // update the file size + file.setLength(position + this.dataCount); + +// System.err.print("moving data: " + this.indexPosition + " @ " + this.dataPointer + "-" + (this.dataPointer+data.length) + " -- "); +// Sys.printArray(data, data.length, false, 0); + // save the data + file.seek(position); + file.write(data); + + // update header pointer info + writeDataInfo(file); + } + + + /** + * Reads the record data for the given record header. + */ + byte[] readDataRaw(RandomAccessFile file) throws IOException { + + byte[] buf = new byte[this.dataCount]; +// System.err.print("Reading data: " + this.indexPosition + " @ " + this.dataPointer + "-" + (this.dataPointer+this.dataCount) + " -- "); + + file.seek(this.dataPointer); + file.readFully(buf); +// Sys.printArray(buf, buf.length, false, 0); + + return buf; + } + + /** + * Reads the record data for the given record header. + */ + @SuppressWarnings("unchecked") + T readData(Kryo kryo, InflaterInputStream inputStream) throws IOException { + Input input = new Input(inputStream, 1024); // read 1024 at a time + Object readObject = kryo.readClassAndObject(input); + return (T) readObject; + } + + + ByteArrayOutputStream getDataStream(Kryo kryo, Object data, Deflater deflater) throws IOException { + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + OutputStream outputStream = new DeflaterOutputStream(byteArrayOutputStream, deflater); + Output output = new Output(outputStream, 1024); // write 1024 at a time + kryo.writeClassAndObject(output, data); + output.flush(); + + outputStream.flush(); + outputStream.close(); + + return byteArrayOutputStream; + } + + /** + * Writes data to the end of the file (which is where the datapointer is at) + */ + void writeDataToEndOfFile(Kryo kryo, Object data, DeflaterOutputStream outputStream) throws IOException { + Output output = new Output(outputStream, 1024); // write 1024 at a time + kryo.writeClassAndObject(output, data); + output.flush(); + + outputStream.flush(); + outputStream.finish(); // have to make sure that the outputstream finishes compressing data + } + + void writeData(ByteArrayOutputStream byteArrayOutputStream, RandomAccessFile file) throws IOException { + this.dataCount = byteArrayOutputStream.size(); + FileOutputStream out = new FileOutputStream(file.getFD()); + file.seek(this.dataPointer); + byteArrayOutputStream.writeTo(out); + out.flush(); + } + + @Override + public String toString() { + return "RecordHeader [dataPointer=" + this.dataPointer + ", dataCount=" + this.dataCount + ", dataCapacity=" + + this.dataCapacity + ", indexPosition=" + this.indexPosition + "]"; + } +} diff --git a/Dorkbox-Util/src/dorkbox/util/storage/Storage.java b/Dorkbox-Util/src/dorkbox/util/storage/Storage.java new file mode 100644 index 0000000..907e4a1 --- /dev/null +++ b/Dorkbox-Util/src/dorkbox/util/storage/Storage.java @@ -0,0 +1,716 @@ +package dorkbox.util.storage; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; + +import org.bouncycastle.crypto.digests.SHA256Digest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import dorkbox.util.DelayTimer; +import dorkbox.util.OS; +import dorkbox.util.bytes.ByteArrayWrapper; + +/** + * Nothing spectacular about this storage -- it allows for persistent storage of objects to disk. + */ +public class Storage { + private static final Logger logger = LoggerFactory.getLogger(Storage.class); + + private static Map storages = new HashMap(1); + + public static Storage open(String file) { + if (file == null || file.isEmpty()) { + throw new IllegalArgumentException("file cannot be null or empty!"); + } + return open(new File(file)); + } + + /** + * Two types of storage. + * Raw) save/load a single object to disk (better for really large files) + * Normal) save/load key/value objects to disk (better for multiple types of data in a single file) + */ + public static Storage open(File file) { + if (file == null) { + throw new IllegalArgumentException("file cannot be null!"); + } + + // if we load from a NEW storage at the same location as an ALREADY EXISTING storage, + // without saving the existing storage first --- whoops! + synchronized (storages) { + Storage storage = storages.get(file); + + if (storage != null) { + boolean waiting = storage.hasWriteWaiting(); + // we want this storage to be in a fresh state + if (waiting) { + storage.saveNow(); + } + storage.increaseReference(); + } else { + try { + StorageBase storageBase = new StorageBase(file); + + storage = new Storage(storageBase); + storages.put(file, storage); + } catch (IOException e) { + logger.error("Unable to open storage", e); + } + } + + return storage; + } + } + + + /** + * Closes the storage. + */ + public static void close(File file) { + synchronized (storages) { + Storage storage = storages.get(file); + if (storage != null) { + boolean isLastOne = storage.decrementReference(); + if (isLastOne) { + storage.close(); + storages.remove(file); + } + } + } + } + + /** + * Closes the storage. + */ + public static void close(Storage _storage) { + synchronized (storages) { + File file = _storage.getFile(); + Storage storage = storages.get(file); + if (storage != null) { + boolean isLastOne = storage.decrementReference(); + if (isLastOne) { + storage.close(); + storages.remove(file); + } + } + } + } + + + private static void copyFields(Object source, Object dest) { + Class sourceClass = source.getClass(); + Class destClass = dest.getClass(); + + if (sourceClass != destClass) { + throw new IllegalArgumentException("Source and Dest objects are not of the same class!"); + } + + // have to walk up the object hierarchy. + while (destClass != Object.class) { + Field[] destFields = destClass.getDeclaredFields(); + + for (Field destField : destFields) { + String name = destField.getName(); + try { + Field sourceField = sourceClass.getDeclaredField(name); + destField.setAccessible(true); + sourceField.setAccessible(true); + + Object sourceObj = sourceField.get(source); + + if (sourceObj instanceof Map) { + Object destObj = destField.get(dest); + if (destObj == null) { + destField.set(dest, sourceObj); + } else if (destObj instanceof Map) { + @SuppressWarnings("unchecked") + Map sourceMap = (Map) sourceObj; + @SuppressWarnings("unchecked") + Map destMap = (Map) destObj; + + destMap.clear(); + Iterator entries = sourceMap.entrySet().iterator(); + while (entries.hasNext()) { + Map.Entry entry = (Map.Entry)entries.next(); + Object key = entry.getKey(); + Object value = entry.getValue(); + destMap.put(key, value); + } + + } else { + logger.error("Incompatible field type! '{}'", name); + } + } else { + destField.set(dest, sourceObj); + } + } catch (Exception e) { + logger.error("Unable to copy field: {}", name, e); + } + } + + destClass = destClass.getSuperclass(); + sourceClass = sourceClass.getSuperclass(); + } + } + + /** + * @return true if all of the fields in the two objects are the same. + * + * NOTE: This is SLIGHTLY different than .equals(), in that there doesn't have to + * be an EXPLICIT .equals() method in the object + */ + private static boolean compareFields(Object source, Object dest) { + Class sourceClass = source.getClass(); + Class destClass = dest.getClass(); + + if (sourceClass != destClass) { + throw new IllegalArgumentException("Source and Dest objects are not of the same class!"); + } + + // have to walk up the object hierarchy. + while (destClass != Object.class) { + Field[] destFields = destClass.getDeclaredFields(); + + for (Field destField : destFields) { + String name = destField.getName(); + try { + Field sourceField = sourceClass.getDeclaredField(name); + destField.setAccessible(true); + sourceField.setAccessible(true); + + Object sourceObj = sourceField.get(source); + Object destObj = destField.get(dest); + + if (sourceObj == null) { + if (destObj == null) { + return true; + } else { + return false; + } + } else { + if (destObj == null) { + return false; + } else { + return destObj.equals(sourceObj); + } + } + } catch (Exception e) { + logger.error("Unable to copy field: {}", name, e); + return false; + } + } + + destClass = destClass.getSuperclass(); + sourceClass = sourceClass.getSuperclass(); + } + + return true; + } + + public static void shutdown() { + synchronized(storages) { + Collection values = storages.values(); + for (Storage storage : values) { + storage.close(); + } + storages.clear(); + } + } + + public static void delete(File file) { + synchronized(storages) { + Storage remove = storages.remove(file); + if (remove != null) { + remove.close(); + } + file.delete(); + } + } + + public static void delete(Storage storage) { + File file = storage.getFile(); + delete(file); + } + + + + + + + + + + private volatile long milliSeconds = 3000L; + private volatile DelayTimer timer; + + private final ByteArrayWrapper defaultKey; + private final StorageBase storage; + + private AtomicInteger references = new AtomicInteger(1); + + private final ReentrantLock actionLock = new ReentrantLock(); + private volatile Map actionMap = new ConcurrentHashMap(); + + private Map cacheMap = new HashMap(); + + private AtomicBoolean isOpen = new AtomicBoolean(false); + + + + /** + * Creates or opens a new database file. + */ + private Storage(StorageBase _storage) { + this.storage = _storage; + this.defaultKey = wrap(""); + + this.timer = new DelayTimer("Storage Writer", false, new DelayTimer.Callback() { + @Override + public void execute() { + Map actions = Storage.this.actionMap; + + ReentrantLock actionLock2 = Storage.this.actionLock; + + try { + actionLock2.lock(); + + // do a fast swap on the actionMap. + Storage.this.actionMap = new ConcurrentHashMap(); + } finally { + actionLock2.unlock(); + } + + // anything in cacheMap must also be resaved, BUT ONLY IF IT's DIFFERENT! + Map cacheMap2 = Storage.this.cacheMap; + StorageBase storage2 = Storage.this.storage; + + synchronized (cacheMap2) { + if (!cacheMap2.isEmpty()) { + for (Entry entry : cacheMap2.entrySet()) { + ByteArrayWrapper key = entry.getKey(); + Object value = entry.getValue(); + + if (value != null && key != null) { + Object originalVersion = storage2.getCached(key); + + if (!value.equals(originalVersion)) { + actions.put(key, value); + } + } + } + } + } + + storage2.doActionThings(actions); + } + }); + + this.isOpen.set(true); + } + + /** + * Returns the number of objects in the database. + *

+ * SLOW because this must save all data to disk first! + */ + public final int size() { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + // flush actions + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + + return this.storage.size(); + } + + /** + * Checks if there is a object corresponding to the given key. + */ + public final boolean contains(String key) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + return this.storage.contains(wrap(key)); + } + + /** + * Registers this key/value (object) pair to be automatically saved in a save operation + * + * @param key the key to save/register this object under + */ + public final void register(String key, Object object) { + ByteArrayWrapper wrap = wrap(key); + + synchronized (this.cacheMap) { + this.cacheMap.put(wrap, object); + } + } + + /** + * UN-Registers this key/value (object) pair, so it will no longer be automatically saved in a save operation. + * + * @param key the key to save/register this object under + */ + public final void unregister(String key) { + ByteArrayWrapper wrap = wrap(key); + + synchronized (this.cacheMap) { + this.cacheMap.remove(wrap); + } + } + + /** + * Reads a object using the default (blank) key + */ + public final T get() { + return get0(this.defaultKey); + } + + /** + * Reads a object using the specific key. + */ + public final T get(String key) { + ByteArrayWrapper wrap = wrap(key); + return get0(wrap); + } + + /** + * Copies the saved data (from) into the passed-in data. This just assigns all of the values from one to the other. + *

+ * This will check to see if there is an associated key for that data, if not - it will use the default (blank) + * + * @param data The data that will hold the copy of the data from disk + */ + public void load(Object data) { + Object source = get(); + + if (source != null) { + Storage.copyFields(source, data); + } + } + + /** + * Copies the saved data (from) into the passed-in data. This just assigns all of the values from one to the other. + *

+ * The key/data will be associated for the lifetime of the object. + * + * @param object The object that will hold the copy of the data from disk (but not change the reference) once this is completed + */ + public void load(String key, Object object) { + ByteArrayWrapper wrap = wrap(key); + + Object source = get0(wrap); + if (source != null) { + Storage.copyFields(source, object); + } + } + + /** + * Reads a object from pending or from storage + */ + private final T get0(ByteArrayWrapper key) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + // if the object in is pending, we get it from there + try { + this.actionLock.lock(); + + Object object = this.actionMap.get(key); + + if (object != null) { + @SuppressWarnings("unchecked") + T returnObject = (T) object; + return returnObject; + } + } finally { + this.actionLock.unlock(); + } + + // not found, so we have to go find it on disk + return this.storage.get(key); + } + + /** + * Save the storage to disk, once xxxx milli-seconds have passed. + * This is to help prevent thrashing the disk, or wearing it out on multiple, rapid, changes. + *

+ * This will save the ALL of the pending save actions to the file + */ + public final void saveNow() { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + } + + /** + * Saves the given data to storage with the associated key. + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + */ + public final void save(String key, Object object) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + ByteArrayWrapper wrap = wrap(key); + action(wrap, object); + + synchronized (this.cacheMap) { + // if our cache has this key, update it! + if (this.cacheMap.containsKey(wrap)) { + this.cacheMap.put(wrap, object); + } + } + + // timer action runs on TIMER thread, not this thread + this.timer.delay(this.milliSeconds); + } + + /** + * Saves the given object to storage with the associated key. + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + */ + public final void saveNow(String key, Object object) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + ByteArrayWrapper wrap = wrap(key); + action(wrap, object); + + synchronized (this.cacheMap) { + // if our cache has this key, update it! + if (this.cacheMap.containsKey(wrap)) { + this.cacheMap.put(wrap, object); + } + } + + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + } + + /** + * Saves all of the registered objects (and pending operations) to storage + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + */ + public final void saveRegistered(String key) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + ByteArrayWrapper wrap = wrap(key); + + synchronized (this.cacheMap) { + if (this.cacheMap.containsKey(wrap)) { + // timer action runs on TIMER thread, not this thread + this.timer.delay(this.milliSeconds); + } + } + } + + /** + * Immediately saves all of the registered objects (and pending operations) to storage + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + */ + public final void saveRegisteredNow(String key) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + ByteArrayWrapper wrap = wrap(key); + + synchronized (this.cacheMap) { + if (this.cacheMap.containsKey(wrap)) { + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + } + } + } + + /** + * Adds the given object to the storage using a default (blank) key + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + */ + public final void save(Object object) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + action(this.defaultKey, object); + + // timer action runs on TIMER thread, not this thread + this.timer.delay(this.milliSeconds); + } + + /** + * Adds the given object to the storage using a default (blank) key + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + */ + public final void saveNow(Object object) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + action(this.defaultKey, object); + + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + } + + /** + * Deletes an object from storage. To ALSO remove from the cache, use unRegister(key) + * + * @return true if the delete was successful. False if there were problems deleting the data. + */ + public final boolean delete(String key) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + ByteArrayWrapper wrap = wrap(key); + + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + + return this.storage.delete(wrap); + } + + /** + * Closes the database and file. + */ + private final void close() { + this.isOpen.set(false); + + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + + this.storage.close(); + this.cacheMap.clear(); + } + + /** + * @return the file that backs this storage + */ + public final File getFile() { + return this.storage.getFile(); + } + + /** + * Gets the backing file size. + * + * @return -1 if there was an error + */ + public final long getFileSize() { + // timer action runs on THIS thread, not timer thread + this.timer.delay(0L); + + return this.storage.getFileSize(); + } + + + /** + * @return true if there are objects queued to be written? + */ + public final boolean hasWriteWaiting() { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + return this.timer.isWaiting(); + } + + /** + * @param delay milliseconds to wait + */ + public final void setSaveDelay(long milliSeconds) { + if (!this.isOpen.get()) { + throw new RuntimeException("Unable to act on closed storage"); + } + + this.milliSeconds = milliSeconds; + } + + /** + * @return the delay in milliseconds this will wait after the last action to flush the data to the disk + */ + public final long getSaveDelay() { + return this.milliSeconds; + } + + /** + * @return the version of data stored in the database + */ + public final int getVersion() { + return this.storage.getVersion(); + } + + /** + * Sets the version of data stored in the database + */ + public final void setVersion(int version) { + this.storage.setVersion(version); + } + + + private final ByteArrayWrapper wrap(String key) { + byte[] bytes = key.getBytes(OS.UTF_8); + + SHA256Digest digest = new SHA256Digest(); + digest.update(bytes, 0, bytes.length); + byte[] hashBytes = new byte[digest.getDigestSize()]; + digest.doFinal(hashBytes, 0); + ByteArrayWrapper wrap = ByteArrayWrapper.wrap(hashBytes); + + return wrap; + } + + private void action(ByteArrayWrapper key, Object object) { + try { + this.actionLock.lock(); + + // push action to map + this.actionMap.put(key, object); + } finally { + this.actionLock.unlock(); + } + } + + private final void increaseReference() { + this.references.incrementAndGet(); + } + + /** return true when this is the last reference */ + private final boolean decrementReference() { + return this.references.decrementAndGet() == 0; + } +} diff --git a/Dorkbox-Util/src/dorkbox/util/storage/StorageBase.java b/Dorkbox-Util/src/dorkbox/util/storage/StorageBase.java new file mode 100644 index 0000000..48da53b --- /dev/null +++ b/Dorkbox-Util/src/dorkbox/util/storage/StorageBase.java @@ -0,0 +1,566 @@ +package dorkbox.util.storage; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.lang.ref.WeakReference; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.ReentrantLock; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.Inflater; +import java.util.zip.InflaterInputStream; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.esotericsoftware.kryo.Kryo; + +import dorkbox.util.bytes.ByteArrayWrapper; + +public class StorageBase { + private final Logger logger = LoggerFactory.getLogger(getClass()); + + + // File pointer to the data start pointer header. + private static final long VERSION_HEADER_LOCATION = 0; + + // File pointer to the num records header. + private static final long NUM_RECORDS_HEADER_LOCATION = 4; + + // File pointer to the data start pointer header. + private static final long DATA_START_HEADER_LOCATION = 8; + + // Total length in bytes of the global database headers. + static final int FILE_HEADERS_REGION_LENGTH = 16; + + + // The in-memory index (for efficiency, all of the record info is cached in memory). + private final Map memoryIndex; + + // determines how much the index will grow by + private Float weight; + + // The keys are weak! When they go, the map entry is removed! + private final ReentrantLock referenceLock = new ReentrantLock(); + private volatile Map referencePendingWrite = new HashMap(); + + + private final File baseFile; + private final RandomAccessFile file; + + + /** + * Version number of database (4 bytes). + */ + private int databaseVersion = 0; + + /** + * Actual number of bytes of data held in this record (4 bytes). + */ + private int numberOfRecords; + + /** + * File pointer to the first byte of the record data (8 bytes). + */ + private long dataPosition; + + + // save references to these, so they don't have to be created/destroyed any time there is I/O + private final Kryo kryo; + + private Deflater deflater; + private DeflaterOutputStream outputStream; + + private Inflater inflater; + private InflaterInputStream inputStream; + + /** + * Creates or opens a new database file. + */ + StorageBase(String filePath) throws IOException { + this(new File(filePath)); + } + + /** + * Creates or opens a new database file. + */ + StorageBase(File filePath) throws IOException { + this.baseFile = filePath; + + File parentFile = this.baseFile.getParentFile(); + if (parentFile != null) { + parentFile.mkdirs(); + } + + this.file = new RandomAccessFile(this.baseFile, "rw"); + + if (this.file.length() > FILE_HEADERS_REGION_LENGTH) { + this.file.seek(VERSION_HEADER_LOCATION); + this.databaseVersion = this.file.readInt(); + this.numberOfRecords = this.file.readInt(); + this.dataPosition = this.file.readLong(); + } else { + setVersionNumber(this.file, 0); + + // always start off with 4 records + setRecordCount(this.file, 4); + + long indexPointer = Metadata.getMetaDataPointer(4); + setDataPosition(this.file, indexPointer); + // have to make sure we can read header info (even if it's blank) + this.file.setLength(indexPointer); + } + + this.kryo = new Kryo(); + this.kryo.setRegistrationRequired(false); + + this.deflater = new Deflater(7, true); + this.outputStream = new DeflaterOutputStream(new FileOutputStream(this.file.getFD()), this.deflater); + + this.inflater = new Inflater(true); + this.inputStream = new InflaterInputStream(new FileInputStream(this.file.getFD()), this.inflater); + + this.weight = .5F; + this.memoryIndex = new ConcurrentHashMap(this.numberOfRecords); + + Metadata meta; + for (int index = 0; index < this.numberOfRecords; index++) { + meta = Metadata.readHeader(this.file, index); + if (meta == null) { + // because we guarantee that empty metadata are AWLAYS at the end of the section, if we get a null one, break! + break; + } + this.memoryIndex.put(meta.key, meta); + } + } + + /** + * Returns the current number of records in the database. + */ + final int size() { + // wrapper flushes first (protected by lock) + // not protected by lock + return this.memoryIndex.size(); + } + + /** + * Checks if there is a record belonging to the given key. + */ + final boolean contains(ByteArrayWrapper key) { + // protected by lock + + // check to see if it's in the pending ops + if (this.referencePendingWrite.containsKey(key)) { + return true; + } + + return this.memoryIndex.containsKey(key); + } + + /** + * @return an object for a specified key ONLY FROM THE REFERENCE CACHE + */ + final T getCached(ByteArrayWrapper key) { + // protected by lock + + Metadata meta = this.memoryIndex.get(key); + if (meta == null) { + return null; + } + + // now stuff it into our reference cache so subsequent lookups are fast! + try { + this.referenceLock.lock(); + + // if we have registered it, get it! + WeakReference ref = meta.objectReferenceCache; + + if (ref != null) { + @SuppressWarnings("unchecked") + T referenceObject = (T) ref.get(); + return referenceObject; + } + } finally { + this.referenceLock.unlock(); + } + + return null; + } + + /** + * @return an object for a specified key form referenceCache FIRST, then from DISK + */ + final T get(ByteArrayWrapper key) { + // NOT protected by lock + + Metadata meta = this.memoryIndex.get(key); + if (meta == null) { + return null; + } + + // now get it from our reference cache so subsequent lookups are fast! + try { + this.referenceLock.lock(); + + // if we have registered it, get it! + WeakReference ref = meta.objectReferenceCache; + + if (ref != null) { + @SuppressWarnings("unchecked") + T referenceObject = (T) ref.get(); + return referenceObject; + } + } finally { + this.referenceLock.unlock(); + } + + + try { + // else, we have to load it from disk + this.inflater.reset(); + this.file.seek(meta.dataPointer); + + @SuppressWarnings("unchecked") + T readRecordData = (T) meta.readData(this.kryo, this.inputStream); + + if (readRecordData != null) { + // now stuff it into our reference cache for future lookups! + try { + this.referenceLock.lock(); + + meta.objectReferenceCache = new WeakReference(readRecordData); + } finally { + this.referenceLock.unlock(); + } + } + + return readRecordData; + } catch (IOException e) { + this.logger.error("Error while geting data from disk", e); + return null; + } + } + + /** + * Deletes a record + * + * @return true if the delete was successful. False if there were problems deleting the data. + */ + final boolean delete(ByteArrayWrapper key) { + // pending ops flushed (protected by lock) + // not protected by lock + Metadata delRec = this.memoryIndex.get(key); + + try { + deleteRecordData(delRec); + deleteRecordIndex(key, delRec); + return true; + } catch (IOException e) { + this.logger.error("Error while deleting data from disk", e); + return false; + } + } + + /** + * Closes the database and file. + */ + final void close() { + // pending ops flushed (protected by lock) + // not protected by lock + try { + this.file.getFD().sync(); + this.file.close(); + this.memoryIndex.clear(); + + this.inputStream.close(); + } catch (IOException e) { + this.logger.error("Error while closing the file", e); + } + } + + /** + * Gets the backing file size. + * + * @return -1 if there was an error + */ + long getFileSize() { + // protected by actionLock + try { + return this.file.length(); + } catch (IOException e) { + this.logger.error("Error getting file size for {}", this.baseFile.getAbsolutePath(), e); + return -1L; + } + } + + /** + * @return the file that backs this storage + */ + final File getFile() { + return this.baseFile; + } + + + /** + * Saves the given data to storage. + *

+ * Also will update existing data. If the new contents do not fit in the original space, then the update is handled by + * deleting the old data and adding the new. + *

+ * Will also save the object in a cache. + * + * @return the metadata for the saved object + */ + private final void save0(ByteArrayWrapper key, Object object, DeflaterOutputStream fileOutputStream, Deflater deflater) { + deflater.reset(); + + Metadata metaData = this.memoryIndex.get(key); + + if (metaData != null) { + // now we have to UPDATE instead of add! + try { + ByteArrayOutputStream dataStream = metaData.getDataStream(this.kryo, object, deflater); + + int size = dataStream.size(); + if (size > metaData.dataCapacity) { + deleteRecordData(metaData); + // stuff this record to the end of the file, since it won't fit in it's current location + metaData.dataPointer = this.file.length(); + // have to make sure that the CAPACITY of the new one is the SIZE of the new data! + // and since it is going to the END of the file, we do that. + metaData.dataCapacity = size; + metaData.dataCount = 0; + } + + metaData.writeData(dataStream, this.file); + metaData.writeDataInfo(this.file); + } catch (IOException e) { + this.logger.error("Error while saving data to disk", e); + } + } else { + int currentRecordCount = this.memoryIndex.size(); + + try { + // try to move the read head in order + setRecordCount(this.file, currentRecordCount+1); + + // This will make sure that there is room to write a new record. This is zero indexed. + // this will skip around if moves occur + ensureIndexCapacity(this.file); + + // append record to end of file + long length = this.file.length(); + metaData = new Metadata(key, currentRecordCount, length); + metaData.writeMetaDataInfo(this.file); + + // update index + this.memoryIndex.put(key, metaData); + + // save out the data. Because we KNOW that we are writing this to the end of the file, + // there are some tricks we can use. + this.file.seek(length); // this is the end of the file, we know this ahead-of-time + + metaData.writeDataToEndOfFile(this.kryo, object, fileOutputStream); + + metaData.dataCount = deflater.getTotalOut(); + metaData.dataCapacity = metaData.dataCount; + // have to save it. + metaData.writeDataInfo(this.file); + } catch (IOException e) { + this.logger.error("Error while writing data to disk", e); + return; + } + } + + // put the object in the reference cache so we can read/get it later on + metaData.objectReferenceCache = new WeakReference(object); + } + + void doActionThings(Map actions) { + DeflaterOutputStream outputStream2 = this.outputStream; + Deflater deflater2 = this.deflater; + + // actions is thrown away after this invocation. GC can pick it up. + // we are only interested in the LAST action that happened for some data. + // items to be "autosaved" are automatically injected into "actions". + for (Entry entry : actions.entrySet()) { + Object object = entry.getValue(); + ByteArrayWrapper key = entry.getKey(); + + // our action list is for explicitly saving objects (but not necessarily "registering" them to be auto-saved + save0(key, object, outputStream2, deflater2); + } + } + + + ///////////////////// + ///////////////////// + // private/index only methods + ///////////////////// + ///////////////////// + + private void deleteRecordData(Metadata deletedRecord) throws IOException { + if (this.file.length() == deletedRecord.dataPointer + deletedRecord.dataCapacity) { + // shrink file since this is the last record in the file + this.file.setLength(deletedRecord.dataPointer); + } else { + Metadata previous = index_getMetaDataFromData(deletedRecord.dataPointer - 1); + if (previous != null) { + // append space of deleted record onto previous record + previous.dataCapacity += deletedRecord.dataCapacity; + previous.writeDataInfo(this.file); + } else { + // the record to delete is the FIRST (of many) in the file. + // the FASTEST way to delete is to grow the records! + // Another option is to move the #2 data to the first data, but then there is the same gap after #2. + Metadata secondRecord = index_getMetaDataFromData(deletedRecord.dataPointer + deletedRecord.dataCapacity + 1); + setDataPosition(this.file, secondRecord.dataPointer); + } + } + + } + + private void deleteRecordIndex(ByteArrayWrapper key, Metadata deleteRecord) throws IOException { + int currentNumRecords = this.memoryIndex.size(); + + if (deleteRecord.indexPosition != currentNumRecords - 1) { + Metadata last = Metadata.readHeader(this.file, currentNumRecords - 1); + last.move(this.file, deleteRecord.indexPosition); + } + + this.memoryIndex.remove(key); + + setRecordCount(this.file, currentNumRecords - 1); + } + + + /** + * Writes the number of records header to the file. + */ + private final void setVersionNumber(RandomAccessFile file, int versionNumber) throws IOException { + this.databaseVersion = versionNumber; + + file.seek(VERSION_HEADER_LOCATION); + file.writeInt(versionNumber); + } + + /** + * Writes the number of records header to the file. + */ + private final void setRecordCount(RandomAccessFile file, int numberOfRecords) throws IOException { + this.numberOfRecords = numberOfRecords; + + file.seek(NUM_RECORDS_HEADER_LOCATION); + file.writeInt(numberOfRecords); + } + + /** + * Writes the data start position to the file. + */ + private final void setDataPosition(RandomAccessFile file, long dataPositionPointer) throws IOException { + this.dataPosition = dataPositionPointer; + + file.seek(DATA_START_HEADER_LOCATION); + file.writeLong(dataPositionPointer); + } + + int getVersion() { + return this.databaseVersion; + } + + void setVersion(int versionNumber) { + try { + setVersionNumber(this.file, versionNumber); + } catch (IOException e) { + this.logger.error("Unable to set the version number", e); + } + } + + + /** + * Returns the record to which the target file pointer belongs - meaning the specified location in the file is part + * of the record data of the RecordHeader which is returned. Returns null if the location is not part of a record. + * (O(n) mem accesses) + */ + private final Metadata index_getMetaDataFromData(long targetFp) { + Iterator iterator = this.memoryIndex.values().iterator(); + + while (iterator.hasNext()) { + Metadata next = iterator.next(); + if (targetFp >= next.dataPointer && targetFp < next.dataPointer + next.dataCapacity) { + return next; + } + } + return null; + } + + + /** + * Ensure index capacity. This operation makes sure the INDEX REGION is large enough to accommodate additional entries. + */ + private final void ensureIndexCapacity(RandomAccessFile file) throws IOException { + int numberOfRecords = this.memoryIndex.size(); // because we are zero indexed, this is ALSO the index where the record will START + int newNumberOfRecords = numberOfRecords + 1; // +1 because this is where that index will END (the start of the NEXT one) + long endIndexPointer = Metadata.getMetaDataPointer(newNumberOfRecords); + + // just set the data position to the end of the file, since we don't have any data yet. + if (endIndexPointer > file.length() && numberOfRecords == 0) { + file.setLength(endIndexPointer); + setDataPosition(file, endIndexPointer); + return; + } + + // now we have to check, is there room for just 1 more entry? + long readDataPosition = this.dataPosition; + if (endIndexPointer < readDataPosition) { + // we have room for this entry. + return; + } + + + // otherwise, we have to grow our index. + Metadata first; + // "intelligent" move strategy. + // we should increase by some weight (ie: .5) would increase the number of allocated + // record headers by 50%, instead of just incrementing them by one at a time. + newNumberOfRecords = newNumberOfRecords + (int) (numberOfRecords * this.weight); + endIndexPointer = Metadata.getMetaDataPointer(newNumberOfRecords); + + // sometimes the endIndexPointer is in the middle of data, so we cannot move a record to where + // data already exists, we have to move it to the end. Since we GUARANTEE that there is never "free space" at the + // end of a file, this is ok + endIndexPointer = Math.max(endIndexPointer, file.length()); + + // we know that the start of the NEW data position has to be here. + setDataPosition(file, endIndexPointer); + + long writeDataPosition = endIndexPointer; + + // if we only have ONE record left, and we move it to the end, then no reason to keep looking for records. + while (endIndexPointer > readDataPosition && numberOfRecords > 0) { + // this is the FIRST record that is in our data section + first = index_getMetaDataFromData(readDataPosition); + if (first == null) { + //nothing is here, so keep checking + readDataPosition += Metadata.INDEX_ENTRY_LENGTH; + continue; + } + +// System.err.println("\nMoving record: " + first.indexPosition + " -> " + writeDataPosition); + first.moveData(file, writeDataPosition); + + int dataCapacity = first.dataCapacity; + readDataPosition += dataCapacity; + writeDataPosition += dataCapacity; + numberOfRecords--; + } + } +} diff --git a/Dorkbox-Util/test/dorkbox/util/StorageTest.java b/Dorkbox-Util/test/dorkbox/util/StorageTest.java index 78f6aa1..1eb6b00 100644 --- a/Dorkbox-Util/test/dorkbox/util/StorageTest.java +++ b/Dorkbox-Util/test/dorkbox/util/StorageTest.java @@ -1,64 +1,438 @@ package dorkbox.util; - import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; +import java.io.OptionalDataException; import java.util.Arrays; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; import org.junit.Test; +import dorkbox.util.storage.Storage; + +/** + * Simple test class for the RecordsFile example. To run the test, set you CLASSPATH and then type + * "java hamner.dbtest.TestRecords" + */ public class StorageTest { + private static final String TEST_DB = "sampleFile.records"; + + static void log(String s) { + System.err.println(s); + } + + @Before + public void deleteDB() { + Storage.delete(new File(TEST_DB)); + } + + @After + public void delete2DB() { + Storage.delete(new File(TEST_DB)); + } + + @Test - public void storageTest() throws IOException { - File tempFile = FileUtil.tempFile("storageTest"); - tempFile.deleteOnExit(); + public void testCreateDB() throws IOException { + Storage storage = Storage.open(TEST_DB); - Data data = new Data(); - Storage storage = Storage.load(tempFile, data); - storage.setSaveDelay(0); + int numberOfRecords1 = storage.size(); + long size1 = storage.getFileSize(); - if (data.bytes != null) { - fail("storage has data when it shouldn't"); - } + Assert.assertEquals("count is not correct", numberOfRecords1, 0); + Assert.assertEquals("size is not correct", size1, 208L); // NOTE this will change based on the data size added! - makeData(data); - storage.save(); + Storage.close(storage); + + storage = Storage.open(TEST_DB); + int numberOfRecords2 = storage.size(); + long size2 = storage.getFileSize(); + + Assert.assertEquals("Record count is not the same", numberOfRecords1, numberOfRecords2); + Assert.assertEquals("size is not the same", size1, size2); + + Storage.close(storage); + } - Data data2 = new Data(); - storage.load(data2); + @Test + public void testAddAsOne() throws IOException, ClassNotFoundException { + int total = 100; - if (!data.equals(data2)) { - fail("storage test not equal"); - } + try { + Storage storage = Storage.open(TEST_DB); + for (int i=0;i