Moved Serializers to their own project

This commit is contained in:
Robinson 2021-01-15 00:46:48 +01:00
parent 654e6ca584
commit 41f060dede
18 changed files with 0 additions and 3405 deletions

View File

@ -1,294 +0,0 @@
/*
* Copyright 2010 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import java.math.BigInteger;
import org.bouncycastle.asn1.ASN1ObjectIdentifier;
import org.bouncycastle.asn1.x9.X9ECParameters;
import org.bouncycastle.crypto.ec.CustomNamedCurves;
import org.bouncycastle.crypto.params.ECDomainParameters;
import org.bouncycastle.crypto.params.ECPrivateKeyParameters;
import org.bouncycastle.math.ec.ECCurve;
import org.bouncycastle.math.ec.ECPoint;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.KryoException;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.esotericsoftware.reflectasm.FieldAccess;
/**
* Only public keys are ever sent across the wire.
*/
public
class EccPrivateKeySerializer extends Serializer<ECPrivateKeyParameters> {
// we use ASM here
private static final FieldAccess ecCurveAccess = FieldAccess.get(ECCurve.class);
private static final int ecCoordIndex = ecCurveAccess.getIndex("coord");
private static final byte usesName = (byte) 1;
private static final byte usesOid = (byte) 2;
public static
void write(Output output, ECPrivateKeyParameters key) throws KryoException {
byte[] bytes;
int length;
ECDomainParameters parameters = key.getParameters();
ECCurve curve = parameters.getCurve();
EccPrivateKeySerializer.serializeCurve(output, curve);
/////////////
BigInteger n = parameters.getN();
ECPoint g = parameters.getG();
/////////////
bytes = n.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
serializeECPoint(g, output);
/////////////
bytes = key.getD()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
}
public static
ECPrivateKeyParameters read(Input input) throws KryoException {
byte[] bytes;
int length;
ECCurve curve = EccPrivateKeySerializer.deserializeCurve(input);
// N
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger n = new BigInteger(bytes);
// G
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
ECPoint g = curve.decodePoint(bytes);
// D
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger D = new BigInteger(bytes);
ECDomainParameters ecDomainParameters = new ECDomainParameters(curve, g, n);
return new ECPrivateKeyParameters(D, ecDomainParameters);
}
static
void serializeCurve(Output output, ECCurve curve) throws KryoException {
byte[] bytes;
int length;
// save out if it's a NAMED curve, or a UN-NAMED curve. If it is named, we can do less work.
String curveName = curve.getClass()
.getSimpleName();
if (CustomNamedCurves.getByName(curveName) != null) {
// we use the name instead of serializing the full curve
output.writeInt(usesName, true);
output.writeString(curveName);
return;
}
else if (curveName.endsWith("Curve")) {
String cleanedName = curveName.substring(0, curveName.indexOf("Curve"));
if (!cleanedName.isEmpty()) {
ASN1ObjectIdentifier oid = CustomNamedCurves.getOID(cleanedName);
if (oid != null) {
// we use the OID (instead of serializing the entire curve)
output.writeInt(usesOid, true);
curveName = oid.getId();
output.writeString(curveName);
return;
}
}
}
// we have to serialize the ENTIRE curve.
// save out the curve info
BigInteger a = curve.getA()
.toBigInteger();
BigInteger b = curve.getB()
.toBigInteger();
BigInteger order = curve.getOrder();
BigInteger cofactor = curve.getCofactor();
BigInteger q = curve.getField()
.getCharacteristic();
/////////////
bytes = a.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = b.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = order.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = cofactor.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = q.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
// coordinate system
int coordinateSystem = curve.getCoordinateSystem();
output.writeInt(coordinateSystem, true);
}
static
ECCurve deserializeCurve(Input input) throws KryoException {
byte[] bytes;
int length;
ECCurve curve;
int serializationType = input.readInt(true);
// lookup via name
if (serializationType == usesName) {
String curveName = input.readString();
X9ECParameters x9Curve = CustomNamedCurves.getByName(curveName);
curve = x9Curve.getCurve();
}
// this means we just lookup the curve via the OID
else if (serializationType == usesOid) {
String oid = input.readString();
X9ECParameters x9Curve = CustomNamedCurves.getByOID(new ASN1ObjectIdentifier(oid));
curve = x9Curve.getCurve();
}
// we have to read in the entire curve information.
else {
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger a = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger b = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger order = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger cofactor = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger q = new BigInteger(bytes);
// coord system
int coordinateSystem = input.readInt(true);
curve = new ECCurve.Fp(q, a, b, order, cofactor);
ecCurveAccess.setInt(curve, ecCoordIndex, coordinateSystem);
}
return curve;
}
static
void serializeECPoint(ECPoint point, Output output) throws KryoException {
if (point.isInfinity()) {
return;
}
ECPoint normed = point.normalize();
byte[] X = normed.getXCoord()
.getEncoded();
byte[] Y = normed.getYCoord()
.getEncoded();
int length = 1 + X.length + Y.length;
output.writeInt(length, true);
output.write(0x04);
output.write(X);
output.write(Y);
}
@Override
public
void write(Kryo kryo, Output output, ECPrivateKeyParameters key) throws KryoException {
write(output, key);
}
@SuppressWarnings("rawtypes")
@Override
public
ECPrivateKeyParameters read(Kryo kryo, Input input, Class type) throws KryoException {
return read(input);
}
}

View File

@ -1,111 +0,0 @@
/*
* Copyright 2010 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import java.math.BigInteger;
import org.bouncycastle.crypto.params.ECDomainParameters;
import org.bouncycastle.crypto.params.ECPublicKeyParameters;
import org.bouncycastle.math.ec.ECCurve;
import org.bouncycastle.math.ec.ECPoint;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.KryoException;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Only public keys are ever sent across the wire.
*/
public
class EccPublicKeySerializer extends Serializer<ECPublicKeyParameters> {
public static
void write(Output output, ECPublicKeyParameters key) throws KryoException {
byte[] bytes;
int length;
ECDomainParameters parameters = key.getParameters();
ECCurve curve = parameters.getCurve();
EccPrivateKeySerializer.serializeCurve(output, curve);
/////////////
BigInteger n = parameters.getN();
ECPoint g = parameters.getG();
/////////////
bytes = n.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
EccPrivateKeySerializer.serializeECPoint(g, output);
EccPrivateKeySerializer.serializeECPoint(key.getQ(), output);
}
public static
ECPublicKeyParameters read(Input input) throws KryoException {
byte[] bytes;
int length;
ECCurve curve = EccPrivateKeySerializer.deserializeCurve(input);
// N
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger n = new BigInteger(bytes);
// G
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
ECPoint g = curve.decodePoint(bytes);
ECDomainParameters ecDomainParameters = new ECDomainParameters(curve, g, n);
// Q
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
ECPoint Q = curve.decodePoint(bytes);
return new ECPublicKeyParameters(Q, ecDomainParameters);
}
@Override
public
void write(Kryo kryo, Output output, ECPublicKeyParameters key) throws KryoException {
write(output, key);
}
@SuppressWarnings("rawtypes")
@Override
public
ECPublicKeyParameters read(Kryo kryo, Input input, Class type) throws KryoException {
return read(input);
}
}

View File

@ -1,28 +0,0 @@
package dorkbox.util.serialization;
import java.io.File;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Serialize the path of a file instead of the File object
*/
public
class FileSerializer extends Serializer<File> {
@Override
public
void write(Kryo kryo, Output output, File file) {
output.writeString(file.getPath());
}
@Override
public
File read(final Kryo kryo, final Input input, final Class<? extends File> type) {
String path = input.readString();
return new File(path);
}
}

View File

@ -1,76 +0,0 @@
/*
* Copyright 2010 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import org.bouncycastle.crypto.params.IESParameters;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Only public keys are ever sent across the wire.
*/
public
class IesParametersSerializer extends Serializer<IESParameters> {
@Override
public
void write(Kryo kryo, Output output, IESParameters key) {
byte[] bytes;
int length;
///////////
bytes = key.getDerivationV();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
///////////
bytes = key.getEncodingV();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
///////////
output.writeInt(key.getMacKeySize(), true);
}
@SuppressWarnings("rawtypes")
@Override
public
IESParameters read(Kryo kryo, Input input, Class type) {
int length;
/////////////
length = input.readInt(true);
byte[] derivation = new byte[length];
input.readBytes(derivation, 0, length);
/////////////
length = input.readInt(true);
byte[] encoding = new byte[length];
input.readBytes(encoding, 0, length);
/////////////
int macKeySize = input.readInt(true);
return new IESParameters(derivation, encoding, macKeySize);
}
}

View File

@ -1,83 +0,0 @@
/*
* Copyright 2010 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import org.bouncycastle.crypto.params.IESWithCipherParameters;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Only public keys are ever sent across the wire.
*/
public
class IesWithCipherParametersSerializer extends Serializer<IESWithCipherParameters> {
@Override
public
void write(Kryo kryo, Output output, IESWithCipherParameters key) {
byte[] bytes;
int length;
///////////
bytes = key.getDerivationV();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
///////////
bytes = key.getEncodingV();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
///////////
output.writeInt(key.getMacKeySize(), true);
///////////
output.writeInt(key.getCipherKeySize(), true);
}
@SuppressWarnings("rawtypes")
@Override
public
IESWithCipherParameters read(Kryo kryo, Input input, Class type) {
int length;
/////////////
length = input.readInt(true);
byte[] derivation = new byte[length];
input.readBytes(derivation, 0, length);
/////////////
length = input.readInt(true);
byte[] encoding = new byte[length];
input.readBytes(encoding, 0, length);
/////////////
int macKeySize = input.readInt(true);
/////////////
int cipherKeySize = input.readInt(true);
return new IESWithCipherParameters(derivation, encoding, macKeySize, cipherKeySize);
}
}

View File

@ -1,167 +0,0 @@
/*
* Copyright 2010 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import java.math.BigInteger;
import org.bouncycastle.crypto.params.RSAPrivateCrtKeyParameters;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Only public keys are ever sent across the wire.
*/
public
class RsaPrivateKeySerializer extends Serializer<RSAPrivateCrtKeyParameters> {
@Override
public
void write(Kryo kryo, Output output, RSAPrivateCrtKeyParameters key) {
byte[] bytes;
int length;
/////////////
bytes = key.getDP()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getDQ()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getExponent()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getModulus()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getP()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getPublicExponent()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getQ()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getQInv()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
}
@SuppressWarnings("rawtypes")
@Override
public
RSAPrivateCrtKeyParameters read(Kryo kryo, Input input, Class type) {
byte[] bytes;
int length;
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger DP = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger DQ = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger exponent = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger modulus = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger P = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger publicExponent = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger q = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger qInv = new BigInteger(bytes);
return new RSAPrivateCrtKeyParameters(modulus, publicExponent, exponent, P, q, DP, DQ, qInv);
}
}

View File

@ -1,77 +0,0 @@
/*
* Copyright 2010 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import java.math.BigInteger;
import org.bouncycastle.crypto.params.RSAKeyParameters;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
/**
* Only public keys are ever sent across the wire.
*/
public
class RsaPublicKeySerializer extends Serializer<RSAKeyParameters> {
@Override
public
void write(Kryo kryo, Output output, RSAKeyParameters key) {
byte[] bytes;
int length;
///////////
bytes = key.getModulus()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
/////////////
bytes = key.getExponent()
.toByteArray();
length = bytes.length;
output.writeInt(length, true);
output.writeBytes(bytes, 0, length);
}
@SuppressWarnings("rawtypes")
@Override
public
RSAKeyParameters read(Kryo kryo, Input input, Class type) {
byte[] bytes;
int length;
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger modulus = new BigInteger(bytes);
/////////////
length = input.readInt(true);
bytes = new byte[length];
input.readBytes(bytes, 0, length);
BigInteger exponent = new BigInteger(bytes);
return new RSAKeyParameters(false, modulus, exponent);
}
}

View File

@ -1,81 +0,0 @@
package dorkbox.util.serialization;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Registration;
import com.esotericsoftware.kryo.Serializer;
import de.javakaffee.kryoserializers.UnmodifiableCollectionsSerializer;
public
class SerializationDefaults {
/**
* Allows for the kryo registration of sensible defaults in a common, well used way.
*/
public static
void register(Kryo kryo) {
// these are registered using the default serializers. We don't customize these, because we don't care about it.
kryo.register(String.class);
kryo.register(String[].class);
kryo.register(int[].class);
kryo.register(short[].class);
kryo.register(float[].class);
kryo.register(double[].class);
kryo.register(long[].class);
kryo.register(byte[].class);
kryo.register(char[].class);
kryo.register(boolean[].class);
kryo.register(Integer[].class);
kryo.register(Short[].class);
kryo.register(Float[].class);
kryo.register(Double[].class);
kryo.register(Long[].class);
kryo.register(Byte[].class);
kryo.register(Character[].class);
kryo.register(Boolean[].class);
kryo.register(Object[].class);
kryo.register(Object[][].class);
kryo.register(Class.class);
kryo.register(Exception.class);
kryo.register(IOException.class);
kryo.register(RuntimeException.class);
kryo.register(NullPointerException.class);
// necessary for the transport of exceptions.
kryo.register(StackTraceElement.class);
kryo.register(StackTraceElement[].class);
kryo.register(ArrayList.class);
kryo.register(HashMap.class);
kryo.register(HashSet.class);
kryo.register(Collections.emptyList().getClass());
kryo.register(Collections.emptySet().getClass());
kryo.register(Collections.emptyMap().getClass());
kryo.register(Collections.emptyNavigableSet().getClass());
kryo.register(Collections.emptyNavigableMap().getClass());
// hacky way to register unmodifiable serializers
Kryo kryoHack = new Kryo() {
@Override
public
Registration register(final Class type, final Serializer serializer) {
kryo.register(type, serializer);
return null;
}
};
UnmodifiableCollectionsSerializer.registerSerializers(kryoHack);
}
}

View File

@ -1,100 +0,0 @@
/*
* Copyright 2018 dorkbox, llc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.serialization;
import java.io.IOException;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.KryoException;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
public
interface SerializationManager<IO> {
/**
* Registers the class using the lowest, next available integer ID and the {@link Kryo#getDefaultSerializer(Class) default serializer}.
* If the class is already registered, the existing entry is updated with the new serializer.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* Because the ID assigned is affected by the IDs registered before it, the order classes are registered is important when using this
* method. The order must be the same at deserialization as it was for serialization.
*/
<T> SerializationManager register(Class<T> clazz);
/**
* Registers the class using the specified ID. If the ID is already in use by the same type, the old entry is overwritten. If the ID
* is already in use by a different type, a {@link KryoException} is thrown.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* IDs must be the same at deserialization as they were for serialization.
*
* @param id Must be >= 0. Smaller IDs are serialized more efficiently. IDs 0-8 are used by default for primitive types and String, but
* these IDs can be repurposed.
*/
<T> SerializationManager register(Class<T> clazz, int id);
/**
* Registers the class using the lowest, next available integer ID and the specified serializer. If the class is already registered,
* the existing entry is updated with the new serializer.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* Because the ID assigned is affected by the IDs registered before it, the order classes are registered is important when using this
* method. The order must be the same at deserialization as it was for serialization.
*/
<T> SerializationManager register(Class<T> clazz, Serializer<T> serializer);
/**
* Registers the class using the specified ID and serializer. If the ID is already in use by the same type, the old entry is
* overwritten. If the ID is already in use by a different type, a {@link KryoException} is thrown.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* IDs must be the same at deserialization as they were for serialization.
*
* @param id Must be >= 0. Smaller IDs are serialized more efficiently. IDs 0-8 are used by default for primitive types and String, but
* these IDs can be repurposed.
*/
<T> SerializationManager register(Class<T> clazz, Serializer<T> serializer, int id);
/**
* Waits until a kryo is available to write, using CAS operations to prevent having to synchronize.
* <p/>
* There is a small speed penalty if there were no kryo's available to use.
*/
void write(IO buffer, Object message) throws IOException;
/**
* Reads an object from the buffer.
*
* @param length should ALWAYS be the length of the expected object!
*/
Object read(IO buffer, int length) throws IOException;
/**
* Writes the class and object using an available kryo instance
*/
void writeFullClassAndObject(Output output, Object value) throws IOException;
/**
* Returns a class read from the input
*/
Object readFullClassAndObject(final Input input) throws IOException;
}

View File

@ -1,100 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.IOException;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Serializer;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import com.esotericsoftware.minlog.Log;
import dorkbox.util.bytes.ByteBuffer2;
import dorkbox.util.serialization.SerializationDefaults;
import dorkbox.util.serialization.SerializationManager;
class DefaultStorageSerializationManager implements SerializationManager<ByteBuffer2> {
private Kryo kryo = new Kryo() {{
// we don't want logging from Kryo...
Log.set(Log.LEVEL_ERROR);
}};
public
DefaultStorageSerializationManager() {
SerializationDefaults.register(kryo);
}
@Override
public
<T> SerializationManager<ByteBuffer2> register(final Class<T> clazz) {
kryo.register(clazz);
return this;
}
@Override
public
<T> SerializationManager<ByteBuffer2> register(final Class<T> clazz, final int id) {
kryo.register(clazz, id);
return this;
}
@Override
public
<T> SerializationManager<ByteBuffer2> register(final Class<T> clazz, final Serializer<T> serializer) {
kryo.register(clazz, serializer);
return this;
}
@Override
public
<T> SerializationManager<ByteBuffer2> register(final Class<T> type, final Serializer<T> serializer, final int id) {
kryo.register(type, serializer, id);
return this;
}
@Override
public
void write(final ByteBuffer2 buffer, final Object message) {
final Output output = new Output();
writeFullClassAndObject(output, message);
buffer.writeBytes(output.getBuffer());
}
@Override
public
Object read(final ByteBuffer2 buffer, final int length) throws IOException {
final Input input = new Input();
buffer.readBytes(input.getBuffer());
final Object o = readFullClassAndObject(input);
buffer.skip(input.position());
return o;
}
@Override
public
void writeFullClassAndObject(final Output output, final Object value) {
kryo.writeClassAndObject(output, value);
}
@Override
public
Object readFullClassAndObject(final Input input) {
return kryo.readClassAndObject(input);
}
}

View File

@ -1,390 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.slf4j.Logger;
import dorkbox.util.DelayTimer;
import dorkbox.util.serialization.SerializationManager;
/**
* Nothing spectacular about this storage -- it allows for persistent storage of objects to disk.
* <p/>
* Be wary of opening the database file in different JVM instances. Even with file-locks, you can corrupt the data.
*/
@SuppressWarnings({"Convert2Diamond", "Convert2Lambda"})
class DiskStorage implements Storage {
// null if we are a read-only storage
private final DelayTimer timer;
// must be volatile
private volatile HashMap<StorageKey, Object> actionMap = new HashMap<StorageKey, Object>();
private final Object singleWriterLock = new Object[0];
// Recommended for best performance while adhering to the "single writer principle". Must be static-final
private static final AtomicReferenceFieldUpdater<DiskStorage, HashMap> actionMapREF =
AtomicReferenceFieldUpdater.newUpdater(DiskStorage.class, HashMap.class, "actionMap");
private final StorageBase storage;
private final AtomicInteger references = new AtomicInteger(1);
private final AtomicBoolean isOpen = new AtomicBoolean(false);
private final long milliSeconds;
/**
* Creates or opens a new database file.
*/
DiskStorage(File storageFile,
SerializationManager serializationManager,
final boolean readOnly,
final long saveDelayInMilliseconds,
final Logger logger) throws IOException {
this.storage = new StorageBase(storageFile, serializationManager, logger);
this.milliSeconds = saveDelayInMilliseconds;
if (readOnly) {
this.timer = null;
}
else {
this.timer = new DelayTimer("Storage Writer", false, new Runnable() {
@Override
public
void run() {
Map<StorageKey, Object> actions;
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
// do a fast swap on the actionMap.
actions = DiskStorage.this.actionMap;
DiskStorage.this.actionMap = new HashMap<StorageKey, Object>();
}
DiskStorage.this.storage.doActionThings(actions);
}
});
}
this.isOpen.set(true);
}
/**
* Returns the number of objects in the database.
* <p/>
* SLOW because this must save all data to disk first!
*/
@Override
public final
int size() {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
// flush actions
// timer action runs on THIS thread, not timer thread
if (timer != null) {
this.timer.delay(0L);
}
return this.storage.size();
}
/**
* Checks if there is a object corresponding to the given key.
*/
@Override
public final
boolean contains(StorageKey key) {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
// access a snapshot of the actionMap (single-writer-principle)
final HashMap actionMap = actionMapREF.get(this);
// check if our pending actions has it, or if our storage index has it
return actionMap.containsKey(key) || this.storage.contains(key);
}
/**
* Reads a object using the specific key, and casts it to the expected class
*/
@Override
public final
<T> T get(StorageKey key) {
return get0(key);
}
/**
* Returns the saved data (or null) for the specified key. Also saves the data as default data.
*
* @param data If there is no object in the DB with the specified key, this value will be the default (and will be saved to the db)
*
* @return NULL if the saved data was the wrong type for the specified key.
*/
@Override
@SuppressWarnings("unchecked")
public
<T> T get(StorageKey key, T data) {
Object source = get0(key);
if (source == null) {
// returned was null, so we should save the default value
put(key, data);
return data;
}
else {
final Class<?> expectedClass = data.getClass();
final Class<?> savedCLass = source.getClass();
if (!expectedClass.isAssignableFrom(savedCLass)) {
String message = "Saved value type '" + savedCLass + "' is different than expected value '" + expectedClass + "'";
if (storage.logger != null) {
storage.logger.error(message);
}
else {
System.err.print(message);
}
return null;
}
}
return (T) source;
}
/**
* Reads a object from pending or from storage
*/
private
<T> T get0(StorageKey key) {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
// access a snapshot of the actionMap (single-writer-principle)
final HashMap actionMap = actionMapREF.get(this);
// if the object in is pending, we get it from there
Object object = actionMap.get(key);
if (object != null) {
@SuppressWarnings("unchecked")
T returnObject = (T) object;
return returnObject;
}
// not found, so we have to go find it on disk
return this.storage.get(key);
}
/**
* Saves the given data to storage with the associated key.
* <p/>
* Also will update existing data. If the new contents do not fit in the original space, then the update is handled by
* deleting the old data and adding the new.
*/
@Override
public final
void put(StorageKey key, Object object) {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
if (timer != null) {
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
// push action to map
actionMap.put(key, object);
}
// timer action runs on TIMER thread, not this thread
this.timer.delay(this.milliSeconds);
} else {
throw new RuntimeException("Unable to put on a read-only storage");
}
}
/**
* Deletes an object from storage.
*
* @return true if the delete was successful. False if there were problems deleting the data.
*/
@Override
public final
boolean delete(StorageKey key) {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
// timer action runs on THIS thread, not timer thread
if (timer != null) {
// flush to storage, so we know if there were errors deleting from disk
this.timer.delay(0L);
return this.storage.delete(key);
}
else {
throw new RuntimeException("Unable to delete on a read-only storage");
}
}
/**
* Closes and removes this storage from the storage system. This is the same as calling {@link StorageSystem#close(Storage)}
*/
@Override
public
void close() {
StorageSystem.close(this);
}
/**
* Closes the database and file.
*/
void closeFully() {
// timer action runs on THIS thread, not timer thread
if (timer != null) {
this.timer.delay(0L);
}
// have to "close" it after we run the timer!
this.isOpen.set(false);
this.storage.close();
}
/**
* @return the file that backs this storage
*/
@Override
public final
File getFile() {
return this.storage.getFile();
}
/**
* Gets the backing file size.
*
* @return -1 if there was an error
*/
@Override
public final
long getFileSize() {
// timer action runs on THIS thread, not timer thread
if (timer != null) {
this.timer.delay(0L);
}
return this.storage.getFileSize();
}
/**
* @return true if there are objects queued to be written?
*/
@Override
public final
boolean hasWriteWaiting() {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
//noinspection SimplifiableIfStatement
if (timer != null) {
return this.timer.isWaiting();
}
else {
return false;
}
}
/**
* @return the delay in milliseconds this will wait after the last action to flush the data to the disk
*/
@Override
public final
long getSaveDelay() {
return this.milliSeconds;
}
/**
* @return the version of data stored in the database
*/
@Override
public final
int getVersion() {
return this.storage.getVersion();
}
/**
* Sets the version of data stored in the database
*/
@Override
public final
void setVersion(int version) {
this.storage.setVersion(version);
}
void increaseReference() {
this.references.incrementAndGet();
}
/**
* return true when this is the last reference
*/
boolean decrementReference() {
return this.references.decrementAndGet() <= 0;
}
@Override
protected
Object clone() throws CloneNotSupportedException {
return super.clone();
}
/**
* Save the storage to disk, immediately.
* <p/>
* This will save ALL of the pending save actions to the file
*/
@Override
public final
void save() {
if (!this.isOpen.get()) {
throw new RuntimeException("Unable to act on closed storage");
}
// timer action runs on THIS thread, not timer thread
if (timer != null) {
this.timer.delay(0L);
} else {
throw new RuntimeException("Unable to save on a read-only storage");
}
}
}

View File

@ -1,200 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.File;
import java.util.HashMap;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
/**
* Storage that is in memory only (and is not persisted to disk)
*/
class MemoryStorage implements Storage {
// must be volatile
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
private volatile HashMap<StorageKey, Object> storage = new HashMap<StorageKey, Object>();
private final Object singleWriterLock = new Object[0];
// Recommended for best performance while adhering to the "single writer principle". Must be static-final
private static final AtomicReferenceFieldUpdater<MemoryStorage, HashMap> storageREF =
AtomicReferenceFieldUpdater.newUpdater(MemoryStorage.class, HashMap.class, "storage");
private int version;
MemoryStorage() {}
/**
* Returns the number of objects in the database.
*/
@Override
public
int size() {
// access a snapshot of the storage (single-writer-principle)
HashMap storage = storageREF.get(this);
return storage.size();
}
/**
* Checks if there is a object corresponding to the given key.
*/
@Override
public
boolean contains(final StorageKey key) {
// access a snapshot of the storage (single-writer-principle)
HashMap storage = storageREF.get(this);
return storage.containsKey(key);
}
/**
* Reads a object using the specific key, and casts it to the expected class
*/
@SuppressWarnings("unchecked")
@Override
public
<T> T get(final StorageKey key) {
// access a snapshot of the storage (single-writer-principle)
HashMap storage = storageREF.get(this);
return (T) storage.get(key);
}
@SuppressWarnings("unchecked")
@Override
public
<T> T get(final StorageKey key, final T data) {
// access a snapshot of the storage (single-writer-principle)
HashMap storage = storageREF.get(this);
final Object o = storage.get(key);
if (o == null) {
storage.put(key, data);
return data;
}
return (T) o;
}
/**
* Saves the given data to storage with the associated key.
* <p/>
* Also will update existing data. If the new contents do not fit in the original space, then the update is handled by
* deleting the old data and adding the new.
*/
@Override
public
void put(final StorageKey key, final Object object) {
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
storage.put(key, object);
}
}
/**
* Deletes an object from storage.
*
* @return true if the delete was successful. False if there were problems deleting the data.
*/
@Override
public synchronized
boolean delete(final StorageKey key) {
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
storage.remove(key);
}
return true;
}
/**
* @return null. There is no file that backs this storage
*/
@Override
public
File getFile() {
return null;
}
/**
* Gets the backing file size.
*
* @return 0. There is no file that backs this storage
*/
@Override
public
long getFileSize() {
return 0;
}
/**
* @return false. Writes to in-memory storage are immediate.
*/
@Override
public
boolean hasWriteWaiting() {
return false;
}
/**
* @return 0. There is no file that backs this storage
*/
@Override
public
long getSaveDelay() {
return 0L;
}
/**
* @return the version of data stored in the database
*/
@Override
public synchronized
int getVersion() {
return version;
}
/**
* Sets the version of data stored in the database
*/
@Override
public synchronized
void setVersion(final int version) {
this.version = version;
}
/**
* There is no file that backs this storage, so writes are immediate and saves do nothgin
*/
@Override
public
void save() {
// no-op
}
/**
* In-memory storage systems do not have a backing file, so there is nothing to close
*/
@Override
public
void close() {
StorageSystem.close(this);
}
}

View File

@ -1,318 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.ByteArrayOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.ref.WeakReference;
import java.nio.channels.FileLock;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import dorkbox.util.serialization.SerializationManager;
public
class Metadata {
// The length of a key in the index.
// SHA256 is 32 bytes long.
private static final int KEY_SIZE = 32;
// Number of bytes in the record header.
private static final int POINTER_INFO_SIZE = 16;
// The total length of one index entry - the key length plus the record header length.
static final int INDEX_ENTRY_LENGTH = KEY_SIZE + POINTER_INFO_SIZE;
/**
* This is the key to the index
*/
final StorageKey key;
/**
* Indicates this header's position in the file index.
*/
volatile int indexPosition;
/**
* File pointer to the first byte of record data (8 bytes).
*/
volatile long dataPointer;
/**
* Actual number of bytes of data held in this record (4 bytes).
*/
volatile int dataCount;
/**
* Number of bytes of data that this record can hold (4 bytes).
*/
volatile int dataCapacity;
/**
* The object that has been registered to this key. This is for automatic saving of data (if it's changed)
*/
volatile WeakReference<Object> objectReferenceCache;
/**
* Returns a file pointer in the index pointing to the first byte in the KEY located at the given index position.
*/
static
long getMetaDataPointer(int position) {
return StorageBase.FILE_HEADERS_REGION_LENGTH + ((long) INDEX_ENTRY_LENGTH) * position;
}
/**
* Returns a file pointer in the index pointing to the first byte in the RECORD pointer located at the given index
* position.
*/
private static
long getDataPointer(int position) {
return Metadata.getMetaDataPointer(position) + KEY_SIZE;
}
private
Metadata(StorageKey key) {
this.key = key;
}
/**
* we don't know how much data there is until AFTER we write the data
*/
Metadata(StorageKey key, int recordIndex, long dataPointer) {
this(key, recordIndex, dataPointer, 0);
}
private
Metadata(StorageKey key, int recordIndex, long dataPointer, int dataCapacity) {
if (key.getBytes().length > KEY_SIZE) {
throw new IllegalArgumentException("Bad record key size: " + dataCapacity);
}
this.key = key;
this.indexPosition = recordIndex;
this.dataPointer = dataPointer;
// we don't always know the size!
this.dataCapacity = dataCapacity;
this.dataCount = dataCapacity;
}
@SuppressWarnings("unused")
int getFreeSpace() {
return this.dataCapacity - this.dataCount;
}
/**
* Reads the Nth HEADER (key + metadata) from the index.
*/
static
Metadata readHeader(RandomAccessFile file, int position) throws IOException {
byte[] buf = new byte[KEY_SIZE];
long origHeaderKeyPointer = Metadata.getMetaDataPointer(position);
FileLock lock = file.getChannel()
.lock(origHeaderKeyPointer, INDEX_ENTRY_LENGTH, true);
file.seek(origHeaderKeyPointer);
file.readFully(buf);
lock.release();
Metadata r = new Metadata(new StorageKey(buf));
r.indexPosition = position;
long recordHeaderPointer = Metadata.getDataPointer(position);
lock = file.getChannel()
.lock(origHeaderKeyPointer, KEY_SIZE, true);
file.seek(recordHeaderPointer);
r.dataPointer = file.readLong();
r.dataCapacity = file.readInt();
r.dataCount = file.readInt();
lock.release();
if (r.dataPointer == 0L || r.dataCapacity == 0L || r.dataCount == 0L) {
return null;
}
return r;
}
void writeMetaDataInfo(RandomAccessFile file) throws IOException {
long recordKeyPointer = Metadata.getMetaDataPointer(this.indexPosition);
FileLock lock = file.getChannel()
.lock(recordKeyPointer, KEY_SIZE, false);
file.seek(recordKeyPointer);
file.write(this.key.getBytes());
lock.release();
}
void writeDataInfo(RandomAccessFile file) throws IOException {
long recordHeaderPointer = getDataPointer(this.indexPosition);
FileLock lock = file.getChannel()
.lock(recordHeaderPointer, POINTER_INFO_SIZE, false);
file.seek(recordHeaderPointer);
file.writeLong(this.dataPointer);
file.writeInt(this.dataCapacity);
file.writeInt(this.dataCount);
lock.release();
}
/**
* Move a record to the new INDEX
*/
void moveRecord(RandomAccessFile file, int newIndex) throws IOException {
byte[] buf = new byte[KEY_SIZE];
long origHeaderKeyPointer = Metadata.getMetaDataPointer(this.indexPosition);
FileLock lock = file.getChannel()
.lock(origHeaderKeyPointer, INDEX_ENTRY_LENGTH, true);
file.seek(origHeaderKeyPointer);
file.readFully(buf);
lock.release();
long newHeaderKeyPointer = Metadata.getMetaDataPointer(newIndex);
lock = file.getChannel()
.lock(newHeaderKeyPointer, INDEX_ENTRY_LENGTH, false);
file.seek(newHeaderKeyPointer);
file.write(buf);
lock.release();
// System.err.println("updating ptr: " + this.indexPosition + " -> " + newIndex + " @ " + newHeaderKeyPointer + "-" + (newHeaderKeyPointer+INDEX_ENTRY_LENGTH));
this.indexPosition = newIndex;
writeDataInfo(file);
}
/**
* Move a record DATA to the new position, and update record header info
*/
void moveData(RandomAccessFile file, long position) throws IOException {
// now we move it to the end of the file.
// we ALSO trim the free space off.
byte[] data = readDataRaw(file);
this.dataPointer = position;
this.dataCapacity = this.dataCount;
FileLock lock = file.getChannel()
.lock(position, this.dataCount, false);
// update the file size
file.setLength(position + this.dataCount);
// System.err.print("moving data: " + this.indexPosition + " @ " + this.dataPointer + "-" + (this.dataPointer+data.length) + " -- ");
// Sys.printArray(data, data.length, false, 0);
// save the data
file.seek(position);
file.write(data);
lock.release();
// update header pointer info
writeDataInfo(file);
}
/**
* Reads the record data for the given record header.
*/
private
byte[] readDataRaw(RandomAccessFile file) throws IOException {
byte[] buf = new byte[this.dataCount];
// System.err.print("Reading data: " + this.indexPosition + " @ " + this.dataPointer + "-" + (this.dataPointer+this.dataCount) + " -- ");
FileLock lock = file.getChannel()
.lock(this.dataPointer, this.dataCount, true);
file.seek(this.dataPointer);
file.readFully(buf);
lock.release();
// Sys.printArray(buf, buf.length, false, 0);
return buf;
}
/**
* Reads the record data for the given record header.
*/
static
<T> T readData(final SerializationManager serializationManager, final Input input) throws IOException {
// this is to reset the internal buffer of 'input'
input.setInputStream(input.getInputStream());
@SuppressWarnings("unchecked")
T readObject = (T) serializationManager.readFullClassAndObject(input);
return readObject;
}
/**
* Writes data to the end of the file (which is where the datapointer is at). This must be locked/released in calling methods!
*/
static
int writeData(final SerializationManager serializationManager,
final Object data,
final Output output) throws IOException {
output.reset();
serializationManager.writeFullClassAndObject(output, data);
output.flush();
return (int) output.total();
}
void writeDataRaw(ByteArrayOutputStream byteArrayOutputStream, RandomAccessFile file) throws IOException {
this.dataCount = byteArrayOutputStream.size();
FileLock lock = file.getChannel()
.lock(this.dataPointer, this.dataCount, false);
FileOutputStream out = new FileOutputStream(file.getFD());
file.seek(this.dataPointer);
byteArrayOutputStream.writeTo(out);
lock.release();
}
@Override
public
String toString() {
return "RecordHeader [dataPointer=" + this.dataPointer + ", dataCount=" + this.dataCount + ", dataCapacity=" + this.dataCapacity +
", indexPosition=" + this.indexPosition + "]";
}
}

View File

@ -1,107 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.File;
/**
*
*/
@SuppressWarnings("unused")
public
interface Storage {
/**
* Returns the number of objects in the database.
*/
int size();
/**
* Checks if there is a object corresponding to the given key.
*/
boolean contains(StorageKey key);
/**
* Reads a object using the specific key, and casts it to the expected class
*/
<T> T get(StorageKey key);
/**
* Returns the saved data for the specified key.
*
* @param key The key used to check if data already exists.
* @param data This is the default value, and if there is no value with the key in the DB this default value will be saved.
*/
<T> T get(StorageKey key, T data);
/**
* Saves the given data to storage with the associated key.
* <p/>
* Also will update existing data. If the new contents do not fit in the original space, then the update is handled by
* deleting the old data and adding the new.
*/
void put(StorageKey key, Object data);
/**
* Deletes an object from storage.
*
* @return true if the delete was successful. False if there were problems deleting the data.
*/
boolean delete(StorageKey key);
/**
* @return the file that backs this storage
*/
File getFile();
/**
* Gets the backing file size.
*
* @return -1 if there was an error
*/
long getFileSize();
/**
* @return true if there are objects queued to be written?
*/
boolean hasWriteWaiting();
/**
* @return the delay in milliseconds this will wait after the last action to flush the data to the disk
*/
long getSaveDelay();
/**
* @return the version of data stored in the database
*/
int getVersion();
/**
* Sets the version of data stored in the database
*/
void setVersion(int version);
/**
* Save the storage to disk, immediately.
* <p/>
* This will save the ALL of the pending save actions to the file
*/
void save();
/**
* Closes this storage system
*/
void close();
}

View File

@ -1,833 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.lang.ref.WeakReference;
import java.nio.channels.Channels;
import java.nio.channels.FileLock;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import dorkbox.os.OS;
import dorkbox.util.serialization.SerializationManager;
// a note on file locking between c and java
// http://panks-dev.blogspot.de/2008/04/linux-file-locks-java-and-others.html
// Also, file locks on linux are ADVISORY. if an app doesn't care about locks, then it can do stuff -- even if locked by another app
@SuppressWarnings("unused")
class StorageBase {
protected final Logger logger;
// File pointer to the data start pointer header.
private static final long VERSION_HEADER_LOCATION = 0;
// File pointer to the num records header.
private static final long NUM_RECORDS_HEADER_LOCATION = 4;
// File pointer to the data start pointer header.
private static final long DATA_START_HEADER_LOCATION = 8;
// Total length in bytes of the global database headers.
static final int FILE_HEADERS_REGION_LENGTH = 16;
// must be volatile
// The in-memory index (for efficiency, all of the record info is cached in memory).
private volatile HashMap<StorageKey, Metadata> memoryIndex;
private final Object singleWriterLock = new Object[0];
// Recommended for best performance while adhering to the "single writer principle". Must be static-final
private static final AtomicReferenceFieldUpdater<StorageBase, HashMap> memoryREF =
AtomicReferenceFieldUpdater.newUpdater(StorageBase.class, HashMap.class, "memoryIndex");
// determines how much the index will grow by
private final Float weight;
// The keys are weak! When they go, the map entry is removed!
private final ReentrantLock referenceLock = new ReentrantLock();
// file/raf that are used
private final File baseFile;
private final RandomAccessFile randomAccessFile;
/**
* Version number of database (4 bytes).
*/
private int databaseVersion = 0;
/**
* Number of records (4 bytes).
*/
private int numberOfRecords;
/**
* File pointer to the first byte of the record data (8 bytes).
*/
private long dataPosition;
// save references to these, so they don't have to be created/destroyed any time there is I/O
private final SerializationManager serializationManager;
private final Output output;
private final Input input;
// input/output write buffer size before flushing to/from the file
private static final int BUFFER_SIZE = 1024;
/**
* Creates or opens a new database file.
*/
StorageBase(final File filePath, final SerializationManager serializationManager, final Logger logger) throws IOException {
this.serializationManager = serializationManager;
this.logger = logger;
if (logger != null) {
logger.info("Opening storage file: '{}'", filePath.getAbsolutePath());
}
this.baseFile = filePath;
boolean newStorage = !filePath.exists();
if (newStorage) {
File parentFile = this.baseFile.getParentFile();
if (parentFile != null && !parentFile.exists()) {
if (!parentFile.mkdirs()) {
throw new IOException("Unable to create dirs for: " + filePath);
}
}
}
this.randomAccessFile = new RandomAccessFile(this.baseFile, "rw");
if (newStorage || this.randomAccessFile.length() <= FILE_HEADERS_REGION_LENGTH) {
setVersion(this.randomAccessFile, 0);
setRecordCount(this.randomAccessFile, 0);
// pad the metadata with 21 records, so there is about 1k of padding before the data starts
long indexPointer = Metadata.getMetaDataPointer(21);
setDataStartPosition(indexPointer);
// have to make sure we can read header info (even if it's blank)
this.randomAccessFile.setLength(indexPointer);
}
else {
this.randomAccessFile.seek(VERSION_HEADER_LOCATION);
this.databaseVersion = this.randomAccessFile.readInt();
this.numberOfRecords = this.randomAccessFile.readInt();
this.dataPosition = this.randomAccessFile.readLong();
if (this.randomAccessFile.length() < this.dataPosition) {
if (logger != null) {
logger.error("Corrupted storage file!");
}
throw new IllegalArgumentException("Unable to parse header information from storage. Maybe it's corrupted?");
}
}
//noinspection AutoBoxing
if (logger != null) {
logger.info("Storage version: {}", this.databaseVersion);
}
// If we want to use compression (no need really, since this file is small already),
// then we have to make sure it's sync'd on flush AND have actually call outputStream.flush().
final InputStream inputStream = Channels.newInputStream(randomAccessFile.getChannel());
final OutputStream outputStream = Channels.newOutputStream(randomAccessFile.getChannel());
// read/write 1024 bytes at a time
output = new Output(outputStream, BUFFER_SIZE);
input = new Input(inputStream, BUFFER_SIZE);
this.weight = 0.5F;
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
this.memoryIndex = new HashMap<StorageKey, Metadata>(this.numberOfRecords);
if (!newStorage) {
Metadata meta;
for (int index = 0; index < this.numberOfRecords; index++) {
meta = Metadata.readHeader(this.randomAccessFile, index);
if (meta == null) {
// because we guarantee that empty metadata are ALWAYS at the end of the section, if we get a null one, break!
break;
}
this.memoryIndex.put(meta.key, meta);
}
if (this.memoryIndex.size() != (this.numberOfRecords)) {
setRecordCount(this.randomAccessFile, this.memoryIndex.size());
if (logger != null) {
logger.warn("Mismatch record count in storage, auto-correcting size.");
}
}
}
}
}
/**
* Returns the current number of records in the database.
*/
final
int size() {
// wrapper flushes first (protected by lock)
// not protected by lock
// access a snapshot of the memoryIndex (single-writer-principle)
HashMap memoryIndex = memoryREF.get(this);
return memoryIndex.size();
}
/**
* Checks if there is a record belonging to the given key.
*/
final
boolean contains(StorageKey key) {
// protected by lock
// access a snapshot of the memoryIndex (single-writer-principle)
HashMap memoryIndex = memoryREF.get(this);
return memoryIndex.containsKey(key);
}
/**
* @return an object for a specified key ONLY FROM THE REFERENCE CACHE
*/
final
<T> T getCached(StorageKey key) {
// protected by lock
// access a snapshot of the memoryIndex (single-writer-principle)
HashMap memoryIndex = memoryREF.get(this);
Metadata meta = (Metadata) memoryIndex.get(key);
if (meta == null) {
return null;
}
// now stuff it into our reference cache so subsequent lookups are fast!
//noinspection Duplicates
try {
this.referenceLock.lock();
// if we have registered it, get it!
WeakReference<Object> ref = meta.objectReferenceCache;
if (ref != null) {
@SuppressWarnings("unchecked")
T referenceObject = (T) ref.get();
return referenceObject;
}
} finally {
this.referenceLock.unlock();
}
return null;
}
/**
* @return an object for a specified key form referenceCache FIRST, then from DISK. NULL if it doesn't exist or there was an error.
*/
final
<T> T get(StorageKey key) {
// NOT protected by lock
// access a snapshot of the memoryIndex (single-writer-principle)
HashMap memoryIndex = memoryREF.get(this);
Metadata meta = (Metadata) memoryIndex.get(key);
if (meta == null) {
return null;
}
// now get it from our reference cache so subsequent lookups are fast!
//noinspection Duplicates
try {
this.referenceLock.lock();
// if we have registered it, get it!
WeakReference<Object> ref = meta.objectReferenceCache;
if (ref != null) {
@SuppressWarnings("unchecked")
T referenceObject = (T) ref.get();
return referenceObject;
}
} finally {
this.referenceLock.unlock();
}
try {
// System.err.println("--Reading data from: " + meta.dataPointer);
// else, we have to load it from disk
this.randomAccessFile.seek(meta.dataPointer);
T readRecordData = Metadata.readData(this.serializationManager, this.input);
if (readRecordData != null) {
// now stuff it into our reference cache for future lookups!
try {
this.referenceLock.lock();
meta.objectReferenceCache = new WeakReference<Object>(readRecordData);
} finally {
this.referenceLock.unlock();
}
}
return readRecordData;
} catch (Exception e) {
String message = e.getMessage();
int index = message.indexOf(OS.LINE_SEPARATOR);
if (index > -1) {
message = message.substring(0, index);
}
if (logger != null) {
logger.error("Error reading data from disk: {}", message);
}
else {
System.err.print("Error reading data from disk: " + message);
}
return null;
}
}
/**
* Deletes a record
*
* @return true if the delete was successful. False if there were problems deleting the data.
*/
final
boolean delete(StorageKey key) {
// pending ops flushed (protected by lock)
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
Metadata delRec = this.memoryIndex.get(key);
try {
deleteRecordData(delRec, delRec.dataCapacity);
// delete the record index
int currentNumRecords = this.memoryIndex.size();
if (delRec.indexPosition != currentNumRecords - 1) {
Metadata last = Metadata.readHeader(this.randomAccessFile, currentNumRecords - 1);
assert last != null;
last.moveRecord(this.randomAccessFile, delRec.indexPosition);
}
this.memoryIndex.remove(key);
setRecordCount(this.randomAccessFile, currentNumRecords - 1);
return true;
} catch (IOException e) {
if (this.logger != null) {
this.logger.error("Error while deleting data from disk", e);
} else {
e.printStackTrace();
}
return false;
}
}
}
/**
* Closes the database and file.
*/
final
void close() {
// pending ops flushed (protected by lock)
// not protected by lock
if (this.logger != null) {
this.logger.info("Closing storage file: '{}'", this.baseFile.getAbsolutePath());
}
try {
this.randomAccessFile.getFD()
.sync();
this.input.close();
this.randomAccessFile.close();
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
this.memoryIndex.clear();
}
} catch (IOException e) {
if (this.logger != null) {
this.logger.error("Error while closing the file", e);
} else {
e.printStackTrace();
}
}
}
/**
* Gets the backing file size.
*
* @return -1 if there was an error
*/
long getFileSize() {
// protected by actionLock
try {
return this.randomAccessFile.length();
} catch (IOException e) {
if (this.logger != null) {
this.logger.error("Error getting file size for {}", this.baseFile.getAbsolutePath(), e);
} else {
e.printStackTrace();
}
return -1L;
}
}
/**
* @return the file that backs this storage
*/
final
File getFile() {
return this.baseFile;
}
/**
* Saves the given data to storage.
* <p/>
* Also will update existing data. If the new contents do not fit in the original space, then the update is handled by
* deleting the old data and adding the new.
* <p/>
* Will also save the object in a cache.
*/
private
void save0(StorageKey key, Object object) {
Metadata metaData;
// synchronized is used here to ensure the "single writer principle", and make sure that ONLY one thread at a time can enter this
// section. Because of this, we can have unlimited reader threads all going at the same time, without contention.
synchronized (singleWriterLock) {
metaData = this.memoryIndex.get(key);
int currentRecordCount = this.numberOfRecords;
if (metaData != null) {
// now we have to UPDATE instead of add!
try {
if (currentRecordCount == 1) {
// if we are the ONLY one, then we can do things differently.
// just dump the data again to disk.
FileLock lock = this.randomAccessFile.getChannel()
.lock(this.dataPosition,
Long.MAX_VALUE - this.dataPosition,
false); // don't know how big it is, so max value it
this.randomAccessFile.seek(this.dataPosition); // this is the end of the file, we know this ahead-of-time
Metadata.writeData(this.serializationManager, object, this.output);
// have to re-specify the capacity and size
//noinspection NumericCastThatLosesPrecision
int sizeOfWrittenData = (int) (this.randomAccessFile.length() - this.dataPosition);
metaData.dataCapacity = sizeOfWrittenData;
metaData.dataCount = sizeOfWrittenData;
lock.release();
}
else {
// this is comparatively slow, since we serialize it first to get the size, then we put it in the file.
ByteArrayOutputStream dataStream = getDataAsByteArray(this.serializationManager, object);
int size = dataStream.size();
if (size > metaData.dataCapacity) {
deleteRecordData(metaData, size);
// stuff this record to the end of the file, since it won't fit in it's current location
metaData.dataPointer = this.randomAccessFile.length();
// have to make sure that the CAPACITY of the new one is the SIZE of the new data!
// and since it is going to the END of the file, we do that.
metaData.dataCapacity = size;
metaData.dataCount = 0;
}
// TODO: should check to see if the data is different. IF SO, then we write, otherwise nothing!
metaData.writeDataRaw(dataStream, this.randomAccessFile);
}
metaData.writeDataInfo(this.randomAccessFile);
} catch (IOException e) {
if (this.logger != null) {
this.logger.error("Error while saving data to disk", e);
} else {
e.printStackTrace();
}
}
}
else {
// metadata == null...
try {
// set the number of records that this storage has
setRecordCount(this.randomAccessFile, currentRecordCount + 1);
// This will make sure that there is room to write a new record. This is zero indexed.
// this will skip around if moves occur
ensureIndexCapacity(this.randomAccessFile);
// append record to end of file
long length = this.randomAccessFile.length();
// System.err.println("--Writing data to: " + length);
metaData = new Metadata(key, currentRecordCount, length);
metaData.writeMetaDataInfo(this.randomAccessFile);
// add new entry to the index
this.memoryIndex.put(key, metaData);
// save out the data. Because we KNOW that we are writing this to the end of the file,
// there are some tricks we can use.
// don't know how big it is, so max value it
FileLock lock = this.randomAccessFile.getChannel()
.lock(0, Long.MAX_VALUE, false);
// this is the end of the file, we know this ahead-of-time
this.randomAccessFile.seek(length);
int total = Metadata.writeData(this.serializationManager, object, this.output);
lock.release();
metaData.dataCount = metaData.dataCapacity = total;
// have to save it.
metaData.writeDataInfo(this.randomAccessFile);
} catch (IOException e) {
if (this.logger != null) {
this.logger.error("Error while writing data to disk", e);
} else {
e.printStackTrace();
}
return;
}
}
}
// put the object in the reference cache so we can read/get it later on
metaData.objectReferenceCache = new WeakReference<Object>(object);
}
private static
ByteArrayOutputStream getDataAsByteArray(SerializationManager serializationManager, Object data) throws IOException {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
Output output = new Output(outputStream, 1024); // write 1024 at a time
serializationManager.writeFullClassAndObject(output, data);
output.flush();
outputStream.flush();
outputStream.close();
return outputStream;
}
void doActionThings(Map<StorageKey, Object> actions) {
// actions is thrown away after this invocation. GC can pick it up.
// we are only interested in the LAST action that happened for some data.
// items to be "autosaved" are automatically injected into "actions".
final Set<Entry<StorageKey, Object>> entries = actions.entrySet();
for (Entry<StorageKey, Object> entry : entries) {
StorageKey key = entry.getKey();
Object object = entry.getValue();
// our action list is for explicitly saving objects (but not necessarily "registering" them to be auto-saved
save0(key, object);
}
}
/////////////////////
/////////////////////
// private/index only methods
/////////////////////
/////////////////////
/**
* "intelligent" move strategy.
* <p/>
* we should increase by some weight (ie: .5) would increase the number of allocated
* record headers by 50%, instead of just incrementing them by one at a time.
*/
private
int getWeightedNewRecordCount(int numberOfRecords) {
//noinspection AutoUnboxing,NumericCastThatLosesPrecision
return numberOfRecords + 1 + (int) (numberOfRecords * this.weight);
}
// protected by singleWriterLock
private
void deleteRecordData(Metadata deletedRecord, int sizeOfDataToAdd) throws IOException {
if (this.randomAccessFile.length() == deletedRecord.dataPointer + deletedRecord.dataCapacity) {
// shrink file since this is the last record in the file
FileLock lock = this.randomAccessFile.getChannel()
.lock(deletedRecord.dataPointer, Long.MAX_VALUE - deletedRecord.dataPointer, false);
this.randomAccessFile.setLength(deletedRecord.dataPointer);
lock.release();
}
else {
// we MIGHT be the FIRST record
Metadata first = index_getMetaDataFromData(this.dataPosition);
if (first == deletedRecord) {
// the record to delete is the FIRST (of many) in the file.
// the FASTEST way to delete is to grow the number of allowed records!
// Another option is to move the #2 data to the first data, but then there is the same gap after #2.
int numberOfRecords = this.numberOfRecords;
// "intelligent" move strategy.
int newNumberOfRecords = getWeightedNewRecordCount(numberOfRecords);
long endIndexPointer = Metadata.getMetaDataPointer(newNumberOfRecords);
long endOfDataPointer = deletedRecord.dataPointer + deletedRecord.dataCapacity;
long newEndOfDataPointer = endOfDataPointer - sizeOfDataToAdd;
if (endIndexPointer < this.dataPosition && endIndexPointer <= newEndOfDataPointer) {
// one option is to shrink the RECORD section to fit the new data
setDataStartPosition(newEndOfDataPointer);
}
else {
// option two is to grow the RECORD section, and put the data at the end of the file
setDataStartPosition(endOfDataPointer);
}
}
else {
Metadata previous = index_getMetaDataFromData(deletedRecord.dataPointer - 1);
if (previous != null) {
// append space of deleted record onto previous record
previous.dataCapacity += deletedRecord.dataCapacity;
previous.writeDataInfo(this.randomAccessFile);
}
else {
// because there is no "previous", that means we MIGHT be the FIRST record
// well, we're not the first record. which one is RIGHT before us?
// it should be "previous", so something messed up
if (this.logger != null) {
this.logger.error("Trying to delete an object, and it's in a weird state");
} else {
System.err.println("Trying to delete an object, and it's in a weird state");
}
}
}
}
}
/**
* Writes the number of records header to the file.
*/
private
void setVersion(RandomAccessFile file, int versionNumber) throws IOException {
this.databaseVersion = versionNumber;
FileLock lock = this.randomAccessFile.getChannel()
.lock(VERSION_HEADER_LOCATION, 4, false);
file.seek(VERSION_HEADER_LOCATION);
file.writeInt(versionNumber);
lock.release();
}
/**
* Writes the number of records header to the file.
*/
private
void setRecordCount(RandomAccessFile file, int numberOfRecords) throws IOException {
if (this.numberOfRecords != numberOfRecords) {
this.numberOfRecords = numberOfRecords;
// System.err.println("Set recordCount: " + numberOfRecords);
FileLock lock = this.randomAccessFile.getChannel()
.lock(NUM_RECORDS_HEADER_LOCATION, 4, false);
file.seek(NUM_RECORDS_HEADER_LOCATION);
file.writeInt(numberOfRecords);
lock.release();
}
}
/**
* Writes the data start position to the file.
*/
private
void setDataStartPosition(long dataPositionPointer) throws IOException {
FileLock lock = this.randomAccessFile.getChannel()
.lock(DATA_START_HEADER_LOCATION, 8, false);
// System.err.println("Setting data position: " + dataPositionPointer);
dataPosition = dataPositionPointer;
randomAccessFile.seek(DATA_START_HEADER_LOCATION);
randomAccessFile.writeLong(dataPositionPointer);
lock.release();
}
int getVersion() {
return this.databaseVersion;
}
void setVersion(int versionNumber) {
try {
setVersion(this.randomAccessFile, versionNumber);
} catch (IOException e) {
if (this.logger != null) {
this.logger.error("Unable to set the version number", e);
} else {
e.printStackTrace();
}
}
}
/**
* Returns the record to which the target file pointer belongs - meaning the specified location in the file is part
* of the record data of the RecordHeader which is returned. Returns null if the location is not part of a record.
* (O(n) mem accesses)
*/
private
Metadata index_getMetaDataFromData(long targetFp) {
// access a snapshot of the memoryIndex (single-writer-principle)
HashMap memoryIndex = memoryREF.get(this);
Iterator iterator = memoryIndex.values().iterator();
//noinspection WhileLoopReplaceableByForEach
while (iterator.hasNext()) {
Metadata next = (Metadata) iterator.next();
if (targetFp >= next.dataPointer && targetFp < next.dataPointer + next.dataCapacity) {
return next;
}
}
return null;
}
/**
* Ensure index capacity. This operation makes sure the INDEX REGION is large enough to accommodate additional entries.
*/
private
void ensureIndexCapacity(RandomAccessFile file) throws IOException {
// because we are zero indexed, this is ALSO the index where the record will START
int numberOfRecords = this.numberOfRecords;
// +1 because this is where that index will END (the start of the NEXT one)
long endIndexPointer = Metadata.getMetaDataPointer(numberOfRecords + 1);
// just set the data position to the end of the file, since we don't have any data yet.
if (endIndexPointer > file.length() && numberOfRecords == 0) {
file.setLength(endIndexPointer);
setDataStartPosition(endIndexPointer);
return;
}
// now we have to check, is there room for just 1 more entry?
long readDataPosition = this.dataPosition;
if (endIndexPointer < readDataPosition) {
// we have room for this entry.
return;
}
// otherwise, we have to grow our index.
Metadata first;
// "intelligent" move strategy.
int newNumberOfRecords = getWeightedNewRecordCount(numberOfRecords);
endIndexPointer = Metadata.getMetaDataPointer(newNumberOfRecords);
// sometimes the endIndexPointer is in the middle of data, so we cannot move a record to where
// data already exists, we have to move it to the end. Since we GUARANTEE that there is never "free space" at the
// end of a file, this is ok
if (endIndexPointer > file.length()) {
// make sure we adjust the file size
file.setLength(endIndexPointer);
}
else {
endIndexPointer = file.length();
}
// we know that the start of the NEW data position has to be here.
setDataStartPosition(endIndexPointer);
long writeDataPosition = endIndexPointer;
// if we only have ONE record left, and we move it to the end, then no reason to keep looking for records.
while (endIndexPointer > readDataPosition && numberOfRecords > 0) {
// this is the FIRST record that is in our data section
first = index_getMetaDataFromData(readDataPosition);
if (first == null) {
//nothing is here, so keep checking
readDataPosition += Metadata.INDEX_ENTRY_LENGTH;
continue;
}
// System.err.println("\nMoving record: " + first.indexPosition + " -> " + writeDataPosition);
first.moveData(file, writeDataPosition);
int dataCapacity = first.dataCapacity;
readDataPosition += dataCapacity;
writeDataPosition += dataCapacity;
numberOfRecords--;
}
}
}

View File

@ -1,61 +0,0 @@
package dorkbox.util.storage;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.KryoException;
import com.esotericsoftware.kryo.Serializer;
public
interface StorageBuilder {
/**
* Builds the storage using the specified configuration
*/
Storage build();
/**
* Registers the class using the lowest, next available integer ID and the {@link Kryo#getDefaultSerializer(Class) default serializer}.
* If the class is already registered, the existing entry is updated with the new serializer.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* Because the ID assigned is affected by the IDs registered before it, the order classes are registered is important when using this
* method. The order must be the same at deserialization as it was for serialization.
*/
<T> StorageBuilder register(Class<T> clazz);
/**
* Registers the class using the specified ID. If the ID is already in use by the same type, the old entry is overwritten. If the ID
* is already in use by a different type, a {@link KryoException} is thrown.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* IDs must be the same at deserialization as they were for serialization.
*
* @param id Must be >= 0. Smaller IDs are serialized more efficiently. IDs 0-8 are used by default for primitive types and String, but
* these IDs can be repurposed.
*/
<T> StorageBuilder register(Class<T> clazz, int id);
/**
* Registers the class using the lowest, next available integer ID and the specified serializer. If the class is already registered,
* the existing entry is updated with the new serializer.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* Because the ID assigned is affected by the IDs registered before it, the order classes are registered is important when using this
* method. The order must be the same at deserialization as it was for serialization.
*/
<T> StorageBuilder register(Class<T> clazz, Serializer<T> serializer);
/**
* Registers the class using the specified ID and serializer. If the ID is already in use by the same type, the old entry is
* overwritten. If the ID is already in use by a different type, a {@link KryoException} is thrown.
* <p>
* Registering a primitive also affects the corresponding primitive wrapper.
* <p>
* IDs must be the same at deserialization as they were for serialization.
*
* @param id Must be >= 0. Smaller IDs are serialized more efficiently. IDs 0-8 are used by default for primitive types and String, but
* these IDs can be repurposed.
*/
<T> StorageBuilder register(Class<T> clazz, Serializer<T> serializer, int id);
}

View File

@ -1,35 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import dorkbox.util.HashUtil;
import dorkbox.util.bytes.ByteArrayWrapper;
/**
* Make a ByteArrayWrapper that is really a SHA256 hash of the bytes.
*/
public
class StorageKey extends ByteArrayWrapper {
public
StorageKey(String key) {
super(HashUtil.getSha256(key), false);
}
public
StorageKey(byte[] key) {
super(key, false);
}
}

View File

@ -1,344 +0,0 @@
/*
* Copyright 2014 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.util.storage;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.helpers.NOPLogger;
import com.esotericsoftware.kryo.Serializer;
import dorkbox.os.OS;
import dorkbox.util.FileUtil;
import dorkbox.util.serialization.SerializationManager;
public
class StorageSystem {
private static final Map<File, Storage> storages = new HashMap<File, Storage>(1);
// Make sure that the timer is run on shutdown. A HARD shutdown will just POW! kill it, a "nice" shutdown will run the hook
private static final Thread shutdownHook = new Thread(new Runnable() {
@Override
public
void run() {
StorageSystem.shutdown();
}
});
static {
// add a shutdown hook to make sure that we properly flush/shutdown storage.
Runtime.getRuntime()
.addShutdownHook(shutdownHook);
}
/**
* Creates a persistent, on-disk storage system. Writes to disk are queued, so it is recommended to NOT edit/change an object after
* it has been put into storage, or whenever it does changes, make sure to put it back into storage (to update the saved record)
*/
public static
DiskBuilder Disk() {
return new DiskBuilder();
}
/**
* Creates an in-memory only storage system
*/
public static
MemoryBuilder Memory() {
return new MemoryBuilder();
}
/**
* Closes the specified storage system based on the file used
*/
public static
void close(final File file) {
synchronized (storages) {
Storage storage = storages.get(file);
if (storage != null) {
if (storage instanceof DiskStorage) {
final DiskStorage diskStorage = (DiskStorage) storage;
boolean isLastOne = diskStorage.decrementReference();
if (isLastOne) {
diskStorage.closeFully();
storages.remove(file);
}
}
}
}
}
/**
* Closes the specified storage system
*/
public static
void close(final Storage storage) {
synchronized (storages) {
File file = storage.getFile();
close(file);
}
}
/**
* Saves and closes all open storage systems
*/
public static
void shutdown() {
synchronized (storages) {
Collection<Storage> values = storages.values();
for (Storage storage : values) {
if (storage instanceof DiskStorage) {
final DiskStorage diskStorage = (DiskStorage) storage;
//noinspection StatementWithEmptyBody
while (!diskStorage.decrementReference()) {
}
diskStorage.closeFully();
}
}
storages.clear();
}
}
/**
* Closes (if in use) and deletes the specified storage file.
* <p>
* The file is checked to see if it is in use by the storage system first, and closes if so.
*/
public static
void delete(File file) {
synchronized (storages) {
Storage remove = storages.remove(file);
if (remove instanceof DiskStorage) {
((DiskStorage) remove).closeFully();
}
//noinspection ResultOfMethodCallIgnored
file.delete();
}
}
/**
* Closes (if in use) and deletes the specified storage.
*/
public static
void delete(Storage storage) {
File file = storage.getFile();
delete(file);
}
/**
* Creates a persistent, on-disk storage system. Writes to disk are queued, so it is recommended to NOT edit/change an object after
* it has been put into storage, or whenever it does changes, make sure to put it back into storage (to update the saved record)
*/
@SuppressWarnings("unused")
public static
class DiskBuilder implements StorageBuilder {
public File file;
public SerializationManager<?> serializationManager = new DefaultStorageSerializationManager(); // default
public boolean readOnly = false;
public Logger logger = null;
public long saveDelayInMilliseconds = 3000L; // default
/**
* Specify the file to write to on disk when saving objects
*/
public
DiskBuilder file(File file) {
this.file = FileUtil.normalize(file);
return this;
}
/**
* Specify the file to write to on disk when saving objects
*/
public
DiskBuilder file(String file) {
this.file = FileUtil.normalize(file);
return this;
}
/**
* Specify the serialization manager to use. This is what serializes the files (which are then saved to disk)
*/
public
DiskBuilder serializer(SerializationManager<?> serializationManager) {
this.serializationManager = serializationManager;
return this;
}
/**
* Mark this storage system as read only
*/
public
DiskBuilder readOnly() {
this.readOnly = true;
return this;
}
/**
* Mark this storage system as read only
*/
public
DiskBuilder setSaveDelay(long saveDelayInMilliseconds) {
this.saveDelayInMilliseconds = saveDelayInMilliseconds;
return this;
}
/**
* Assigns a logger to use for the storage system. If null, then only errors will be logged to the error console.
*/
public
DiskBuilder logger(final Logger logger) {
this.logger = logger;
return this;
}
/**
* Assigns a No Operation (NOP) logger which will ignore everything. This is not recommended for normal use, as it will also
* suppress serialization errors.
*/
public
DiskBuilder noLogger() {
this.logger = NOPLogger.NOP_LOGGER;
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz) {
this.serializationManager.register(clazz);
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz, final int id) {
this.serializationManager.register(clazz, id);
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz, final Serializer<T> serializer) {
this.serializationManager.register(clazz, serializer);
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz, final Serializer<T> serializer, final int id) {
this.serializationManager.register(clazz, serializer, id);
return this;
}
/**
* Makes the storage system
*/
@Override
public
Storage build() {
if (this.file == null) {
throw new IllegalArgumentException("file cannot be null!");
}
// if we load from a NEW storage at the same location as an ALREADY EXISTING storage,
// without saving the existing storage first --- whoops!
synchronized (storages) {
Storage storage = storages.get(this.file);
if (storage != null) {
if (storage instanceof DiskStorage) {
boolean waiting = storage.hasWriteWaiting();
// we want this storage to be in a fresh state
if (waiting) {
storage.save();
}
((DiskStorage) storage).increaseReference();
}
else {
throw new RuntimeException("Unable to change storage types for: " + this.file);
}
}
else {
try {
storage = new DiskStorage(this.file, this.serializationManager, this.readOnly, this.saveDelayInMilliseconds, this.logger);
storages.put(this.file, storage);
} catch (IOException e) {
String message = e.getMessage();
int index = message.indexOf(OS.LINE_SEPARATOR);
if (index > -1) {
message = message.substring(0, index);
}
if (logger != null) {
logger.error("Unable to open storage file at {}. {}", this.file, message);
}
else {
System.err.print("Unable to open storage file at " + this.file + ". " + message);
}
}
}
return storage;
}
}
}
/**
* Creates an in-memory only storage system. This storage system DOES NOT care about serializing data, so `register` has no effect.
*/
public static
class MemoryBuilder implements StorageBuilder {
/**
* Builds the storage system
*/
@Override
public
Storage build() {
return new MemoryStorage();
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz) {
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz, final int id) {
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz, final Serializer<T> serializer) {
return this;
}
@Override
public
<T> StorageBuilder register(final Class<T> clazz, final Serializer<T> serializer, final int id) {
return this;
}
}
}