Initial commit

master Version_1.0
Robinson 2021-05-02 11:35:58 +02:00
commit 60c6f378e6
27 changed files with 3246 additions and 0 deletions

121
.gitignore vendored Normal file
View File

@ -0,0 +1,121 @@
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.idea/**/codeStyles/
.idea/**/codeStyleSettings.xml
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/shelf/
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
######################
# End JetBrains IDEs #
######################
# From https://github.com/github/gitignore/blob/master/Gradle.gitignore
.gradle
/build/
# Ignore Gradle GUI config
gradle-app.setting
# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
!gradle-wrapper.jar
!gradle-wrapper.properties
# Cache of project
.gradletasknamecache
# From https://github.com/github/gitignore/blob/master/Java.gitignore
*.class
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
##########################################################
# Specific to this module
# iml files are generated by intellij/gradle now
**/*.iml

5
LICENSE Normal file
View File

@ -0,0 +1,5 @@
- Storage - Storage system for Java
[The Apache Software License, Version 2.0]
https://git.dorkbox.com/dorkbox/Storage
Copyright 2021
Dorkbox LLC

218
LICENSE.Apachev2 Normal file
View File

@ -0,0 +1,218 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

26
LICENSE.BSD3 Normal file
View File

@ -0,0 +1,26 @@
BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <ORGANIZATION> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

21
LICENSE.MIT Normal file
View File

@ -0,0 +1,21 @@
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

47
LICENSE.OLDAPv2.8 Normal file
View File

@ -0,0 +1,47 @@
The OpenLDAP Public License
Version 2.8, 17 August 2003
Redistribution and use of this software and associated documentation
("Software"), with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions in source form must retain copyright statements
and notices,
2. Redistributions in binary form must reproduce applicable copyright
statements and notices, this list of conditions, and the following
disclaimer in the documentation and/or other materials provided
with the distribution, and
3. Redistributions must contain a verbatim copy of this document.
The OpenLDAP Foundation may revise this license from time to time.
Each revision is distinguished by a version number. You may use
this Software under terms of this license revision or under the
terms of any subsequent revision of the license.
THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS
CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S)
OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The names of the authors and copyright holders must not be used in
advertising or otherwise to promote the sale, use or other dealing
in this Software without specific, written prior permission. Title
to copyright in this Software shall at all times remain with copyright
holders.
OpenLDAP is a registered trademark of the OpenLDAP Foundation.
Copyright 1999-2003 The OpenLDAP Foundation, Redwood City,
California, USA. All Rights Reserved. Permission to copy and
distribute verbatim copies of this document is granted.

39
README.md Normal file
View File

@ -0,0 +1,39 @@
Dorkbox General Utilities
###### [![Dorkbox](https://badge.dorkbox.com/dorkbox.svg "Dorkbox")](https://git.dorkbox.com/dorkbox/SwingActiveRender) [![Github](https://badge.dorkbox.com/github.svg "Github")](https://github.com/dorkbox/SwingActiveRender) [![Gitlab](https://badge.dorkbox.com/gitlab.svg "Gitlab")](https://gitlab.com/dorkbox/SwingActiveRender)
The files here are copyright by many different authors, usually Apache 2.0, MIT, or BSD.
Please see the header of each file for the specific license that applies to it. Specific license details are in the LICENSE file.
Maven Info
---------
```
<dependencies>
...
<dependency>
<groupId>com.dorkbox</groupId>
<artifactId>SwingActiveRender</artifactId>
<version>1.0</version>
</dependency>
</dependencies>
```
Gradle Info
---------
````
dependencies {
...
compile "com.dorkbox:SwingActiveRender:1.0"
}
````
License
---------
This project is © 2021 dorkbox llc, and is distributed under the terms of the Apache v2.0 License. See file "LICENSE" for further
references.

129
build.gradle.kts Normal file
View File

@ -0,0 +1,129 @@
/*
* Copyright 2021 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.time.Instant
///////////////////////////////
////// PUBLISH TO SONATYPE / MAVEN CENTRAL
////// TESTING : (to local maven repo) <'publish and release' - 'publishToMavenLocal'>
////// RELEASE : (to sonatype/maven central), <'publish and release' - 'publishToSonatypeAndRelease'>
///////////////////////////////
gradle.startParameter.showStacktrace = ShowStacktrace.ALWAYS // always show the stacktrace!
gradle.startParameter.warningMode = WarningMode.All
plugins {
java
id("com.dorkbox.GradleUtils") version "1.12"
id("com.dorkbox.Licensing") version "2.5.2"
id("com.dorkbox.VersionUpdate") version "2.1"
id("com.dorkbox.GradlePublish") version "1.8"
}
object Extras {
// set for the project
const val description = "Storage system for Java"
const val group = "com.dorkbox"
const val version = "1.0"
// set as project.ext
const val name = "Storage"
const val id = "Storage"
const val vendor = "Dorkbox LLC"
const val vendorUrl = "https://dorkbox.com"
const val url = "https://git.dorkbox.com/dorkbox/Storage"
val buildDate = Instant.now().toString()
}
///////////////////////////////
///// assign 'Extras'
///////////////////////////////
GradleUtils.load("$projectDir/../../gradle.properties", Extras)
GradleUtils.fixIntellijPaths()
GradleUtils.defaultResolutionStrategy()
GradleUtils.compileConfiguration(JavaVersion.VERSION_1_6)
licensing {
license(License.APACHE_2) {
description(Extras.description)
author(Extras.vendor)
url(Extras.url)
}
}
sourceSets {
main {
java {
setSrcDirs(listOf("src"))
// want to include java files for the source. 'setSrcDirs' resets includes...
include("**/*.java")
}
}
}
repositories {
mavenLocal() // this must be first!
jcenter()
}
tasks.jar.get().apply {
manifest {
// https://docs.oracle.com/javase/tutorial/deployment/jar/packageman.html
attributes["Name"] = Extras.name
attributes["Specification-Title"] = Extras.name
attributes["Specification-Version"] = Extras.version
attributes["Specification-Vendor"] = Extras.vendor
attributes["Implementation-Title"] = "${Extras.group}.${Extras.id}"
attributes["Implementation-Version"] = Extras.buildDate
attributes["Implementation-Vendor"] = Extras.vendor
attributes["Automatic-Module-Name"] = Extras.id
}
}
dependencies {
// implementation("com.dorkbox:Utilities:1.5.1")
}
publishToSonatype {
groupId = Extras.group
artifactId = Extras.id
version = Extras.version
name = Extras.name
description = Extras.description
url = Extras.url
vendor = Extras.vendor
vendorUrl = Extras.vendorUrl
issueManagement {
url = "${Extras.url}/issues"
nickname = "Gitea Issues"
}
developer {
id = "dorkbox"
name = Extras.vendor
email = "email@dorkbox.com"
}
}

BIN
gradle/wrapper/gradle-wrapper.jar vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-6.5.1-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

185
gradlew vendored Normal file
View File

@ -0,0 +1,185 @@
#!/usr/bin/env sh
#
# Copyright 2015 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin or MSYS, switch paths to Windows format before running java
if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=`expr $i + 1`
done
case $i in
0) set -- ;;
1) set -- "$args0" ;;
2) set -- "$args0" "$args1" ;;
3) set -- "$args0" "$args1" "$args2" ;;
4) set -- "$args0" "$args1" "$args2" "$args3" ;;
5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=`save "$@"`
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
exec "$JAVACMD" "$@"

104
gradlew.bat vendored Normal file
View File

@ -0,0 +1,104 @@
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

15
settings.gradle.kts Normal file
View File

@ -0,0 +1,15 @@
/*
* Copyright 2021 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

View File

@ -0,0 +1,2 @@
package dorkbox.storage

View File

@ -0,0 +1,94 @@
/*
* Copyright 2020 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.network.storage.types
import dorkbox.network.storage.GenericStore
import dorkbox.network.storage.SettingsStore
import dorkbox.network.storage.StorageType
import mu.KLogger
import net.openhft.chronicle.map.ChronicleMap
import java.io.File
import java.net.InetAddress
/**
* Chronicle Map is a super-fast, in-memory, non-blocking, key-value store
*
* https://github.com/OpenHFT/Chronicle-Map
*/
class ChronicleMapStore(val dbFile: File, val logger: KLogger): GenericStore {
companion object {
fun type(dbFile: String) : StorageType {
return type(File(dbFile))
}
fun type(dbFile: File) = object : StorageType {
override fun create(logger: KLogger): SettingsStore {
return SettingsStore(logger, ChronicleMapStore(dbFile.absoluteFile, logger))
}
}
}
private val map = ChronicleMap.of(ByteArray::class.java, ByteArray::class.java)
.name("machine-keys")
.entries(1_000_000)
.constantValueSizeBySample(ByteArray(32))
.averageKeySize(16.0)
.createPersistedTo(dbFile)
// byte 0 is SALT
private val saltBuffer = ByteArray(1) {0}
// byte 1 is private key
private val privateKeyBuffer = ByteArray(1) {1}
init {
logger.info("ChronicleMap storage initialized at: '$dbFile'")
}
private fun getBytes(key: Any): ByteArray {
return when (key) {
is InetAddress -> key.address
SettingsStore.saltKey -> saltBuffer
SettingsStore.privateKey -> privateKeyBuffer
else -> throw IllegalArgumentException("Unable to manage property: $key")
}
}
override fun get(key: Any): ByteArray? {
return map[getBytes(key)]
}
/**
* Setting to NULL removes it
*/
@Suppress("DuplicatedCode")
override fun set(key: Any, bytes: ByteArray?) {
val keyBytes = getBytes(key)
if (bytes == null) {
map.remove(keyBytes)
}
else {
map[keyBytes] = bytes
}
}
override fun close() {
map.close()
}
}

View File

@ -0,0 +1,247 @@
/*
* Copyright 2021 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.storage.types
import com.esotericsoftware.kryo.io.ByteBufferOutput
import com.esotericsoftware.kryo.io.Input
import dorkbox.serializers.SerializationManager
import dorkbox.storage.Storage
import mu.KLogger
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.IOException
import java.util.*
import java.util.concurrent.ConcurrentHashMap
/**
* Java property files
*/
internal class PropertyStore(
private val dbFile: File,
private val autoLoad: Boolean,
private val readOnly: Boolean,
private val readOnlyViolent: Boolean,
private val serializationManager: SerializationManager<*>,
private val logger: KLogger
) : Storage() {
companion object {
private val comparator = Comparator<Any> { o1, o2 -> o1.toString().compareTo(o2.toString()) }
}
private val thread = Thread { close() }
@Volatile
private var lastModifiedTime = 0L
private var output = ByteBufferOutput(64, 65535) // A reasonable max size of an object. We don't want to support it being TOO big
private var input = Input()
private val loadedProps = ConcurrentHashMap<String, Any>()
init {
load()
// Make sure that the timer is run on shutdown. A HARD shutdown will just POW! kill it, a "nice" shutdown will run the hook
Runtime.getRuntime().addShutdownHook(thread)
logger.info("Property file storage initialized at: '$dbFile'")
}
private fun load() {
// if we cannot load, then we create a properties file.
if (!dbFile.canRead() && !dbFile.parentFile.mkdirs() && !dbFile.createNewFile()) {
throw IOException("Cannot create file")
}
try {
synchronized(dbFile) {
FileInputStream(dbFile).use { fileStream ->
val properties = Properties()
properties.load(fileStream)
lastModifiedTime = dbFile.lastModified()
properties.entries.forEach { (k, v) ->
val key = k as String
val value = Base64.getDecoder().decode(v as String)
input.reset()
input.buffer = value
try {
val valueObject = serializationManager.readFullClassAndObject(input)
if (valueObject != null) {
loadedProps[key] = valueObject
} else {
logger.error("Unable to parse property (file: $dbFile) $key : $value")
}
} catch (e: Exception) {
logger.error("Unable to parse property (file: $dbFile) $key : $value", e)
}
}
properties.clear()
}
}
} catch (e: IOException) {
logger.error("Cannot load properties!", e)
}
}
private fun save() {
if (readOnly) {
// don't accidentally save this!
return
}
// if we cannot save, then we create a NEW properties file. It could have been DELETED out from under us (while in use!)
if (!dbFile.canRead() && !dbFile.parentFile.mkdirs() && !dbFile.createNewFile()) {
throw IOException("Cannot create file")
}
try {
synchronized(dbFile) {
val properties = object : Properties() {
override fun keys(): Enumeration<Any> {
val keysEnum = super.keys()
val vector = Vector<Any>(size())
while (keysEnum.hasMoreElements()) {
vector.add(keysEnum.nextElement())
}
vector.sortWith(comparator)
return vector.elements()
}
}
loadedProps.forEach { (key, value) ->
// have to serialize the value
output.reset()
try {
serializationManager.writeFullClassAndObject(output, value)
output.byteBuffer.flip()
val valueEncoded = Base64.getEncoder().encode(output.byteBuffer)
properties[key] = String(valueEncoded.array(), 0, valueEncoded.limit())
} catch (e: Exception) {
logger.error("Unable to save property: $key $value")
}
}
FileOutputStream(dbFile, false).use { fos ->
properties.store(fos, "Storage data, Version: $version")
fos.flush()
properties.clear()
lastModifiedTime = dbFile.lastModified()
}
}
} catch (e: IOException) {
logger.error("Properties cannot save to: $dbFile", e)
}
}
override fun file(): File {
return dbFile
}
override fun setVersion(version: Int) {
super.setVersion(version)
}
override fun size(): Int {
return loadedProps.size
}
override fun contains(key: String): Boolean {
if (autoLoad) {
// we want to check the last modified time when getting, because if we edit the on-disk file, we want to load those changes
val lastModifiedTime = dbFile.lastModified()
if (this.lastModifiedTime != lastModifiedTime) {
// we want to reload the info
load()
}
}
return loadedProps[key] != null
}
override operator fun <T> get(key: String): T? {
if (autoLoad) {
// we want to check the last modified time when getting, because if we edit the on-disk file, we want to load those changes
val lastModifiedTime = dbFile.lastModified()
if (this.lastModifiedTime != lastModifiedTime) {
// we want to reload the info
load()
}
}
val any = loadedProps[key]
if (any != null) {
@Suppress("UNCHECKED_CAST")
return any as T
}
return null
}
override operator fun set(key: String, data: Any?) {
if (readOnly) {
if (readOnlyViolent) {
throw IOException("Unable to save data in $dbFile for $key : $data")
} else {
return
}
}
val hasChanged = if (data == null) {
loadedProps.remove(key) != null
} else {
val prev = loadedProps.put(key, data)
prev !== data
}
// every time we set info, we want to save it to disk (so the file on disk will ALWAYS be current, and so we can modify it as we choose)
if (hasChanged) {
save()
}
}
/**
* Deletes all contents of this storage, and if applicable, it's location on disk.
*/
override fun deleteAll() {
if (readOnly) {
if (readOnlyViolent) {
throw IOException("Unable to delete all data in $dbFile")
} else {
return
}
}
loadedProps.clear()
dbFile.delete()
}
/**
* Closes this storage (and if applicable, flushes it's content to disk)
*/
override fun close() {
Runtime.getRuntime().removeShutdownHook(thread)
save()
}
}

View File

@ -0,0 +1,125 @@
/*
* Copyright 2020 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.network.storage.types
import dorkbox.network.storage.GenericStore
import dorkbox.network.storage.SettingsStore
import dorkbox.network.storage.StorageType
import mu.KLogger
import org.lmdbjava.ByteArrayProxy
import org.lmdbjava.Dbi
import org.lmdbjava.DbiFlags
import org.lmdbjava.Env
import org.lmdbjava.EnvFlags
import java.io.File
import java.net.InetAddress
/**
* Lightning Memory Database
*
* https://github.com/lmdbjava/lmdbjava
*/
class LmdbStore(val dbFile: File, val logger: KLogger): GenericStore {
companion object {
fun type(dbFile: String) : StorageType {
return type(File(dbFile))
}
fun type(dbFile: File) = object : StorageType {
override fun create(logger: KLogger): SettingsStore {
return SettingsStore(logger, LmdbStore(dbFile.absoluteFile, logger))
}
}
}
private val env: Env<ByteArray>
private val db: Dbi<ByteArray>
init {
val prep = Env.create(ByteArrayProxy.PROXY_BA).setMapSize(1048760).setMaxDbs(1)
env = if (dbFile.isDirectory) {
prep.open(dbFile)
}
else {
// The database lock file is the path with "-lock" appended.
prep.open(dbFile, EnvFlags.MDB_NOSUBDIR)
}
db = env.openDbi("machine-keys", DbiFlags.MDB_CREATE)
logger.info("LMDB storage initialized at: '$dbFile'")
}
// byte 0 is SALT
private val saltBuffer = ByteArray(1) {0}
// byte 1 is private key
private val privateKeyBuffer = ByteArray(1) {1}
private fun getBytes(key: Any): ByteArray {
return when (key) {
is InetAddress -> key.address
SettingsStore.saltKey -> saltBuffer
SettingsStore.privateKey -> privateKeyBuffer
else -> throw IllegalArgumentException("Unable to manage property: $key")
}
}
override fun get(key: Any): ByteArray? {
val keyBytes = getBytes(key)
return env.txnRead().use { txn ->
db.get(txn, keyBytes) ?: return null
}
}
/**
* Setting to NULL removes it
*/
@Suppress("DuplicatedCode")
override fun set(key: Any, bytes: ByteArray?) {
val keyBytes = getBytes(key)
if (bytes == null) {
env.txnWrite().use { txn ->
db.delete(txn, keyBytes)
// An explicit commit is required, otherwise Txn.close() rolls it back.
txn.commit()
}
}
else {
env.txnWrite().use { txn ->
try {
db.put(txn, keyBytes, bytes)
} catch (e: Exception) {
logger.error("Unable to save to LMDB!", e)
}
// An explicit commit is required, otherwise Txn.close() rolls it back.
txn.commit()
}
}
}
override fun close() {
db.close()
env.close()
}
}

View File

@ -0,0 +1,54 @@
/*
* Copyright 2020 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.network.storage.types
import dorkbox.network.storage.GenericStore
import dorkbox.network.storage.SettingsStore
import dorkbox.network.storage.StorageType
import mu.KLogger
import org.agrona.collections.Object2ObjectHashMap
/**
* In-Memory store
*/
object MemoryStore {
fun type() = object : StorageType {
override fun create(logger: KLogger): SettingsStore {
return SettingsStore(logger, MemoryAccess(logger))
}
}
}
class MemoryAccess(val logger: KLogger): GenericStore {
private val map = Object2ObjectHashMap<Any, ByteArray>()
init {
logger.info("Memory storage initialized")
}
override fun get(key: Any): ByteArray? {
return map[key]
}
override fun set(key: Any, bytes: ByteArray?) {
map[key] = bytes
}
override fun close() {
}
}

View File

@ -0,0 +1,169 @@
/*
* Copyright 2020 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.network.storage.types
import dorkbox.netUtil.IP
import dorkbox.network.storage.GenericStore
import dorkbox.network.storage.SettingsStore
import dorkbox.network.storage.StorageType
import dorkbox.util.Sys
import dorkbox.util.properties.SortedProperties
import mu.KLogger
import org.agrona.collections.Object2ObjectHashMap
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.IOException
import java.net.InetAddress
import java.util.*
/**
* Java property files
*/
class PropertyStore(val dbFile: File, val logger: KLogger): GenericStore {
companion object {
fun type(dbFile: String) : StorageType {
return LmdbStore.type(File(dbFile))
}
fun type(dbFile: File) = object : StorageType {
override fun create(logger: KLogger): SettingsStore {
return SettingsStore(logger, PropertyStore (dbFile, logger))
}
}
}
@Volatile
private var lastModifiedTime = 0L
private val loadedProps = Object2ObjectHashMap<Any, ByteArray>()
init {
load()
logger.info("Property file storage initialized at: '$dbFile'")
}
private fun load() {
// if we cannot load, then we create a properties file.
if (!dbFile.canRead() && !dbFile.createNewFile()) {
throw IOException("Cannot create file")
}
val input = FileInputStream(dbFile)
try {
val properties = Properties()
properties.load(input)
lastModifiedTime = dbFile.lastModified()
properties.entries.forEach {
val key = it.key as String
val value = it.value as String
when (key) {
SettingsStore.saltKey -> loadedProps[SettingsStore.saltKey] = Sys.hexToBytes(value)
SettingsStore.privateKey -> loadedProps[SettingsStore.privateKey] = Sys.hexToBytes(value)
else -> {
val address: InetAddress? = IP.fromString(key)
if (address != null) {
loadedProps[address] = Sys.hexToBytes(value)
} else {
logger.error("Unable to parse property file: $dbFile $key $value")
}
}
}
}
properties.clear()
} catch (e: IOException) {
logger.error("Cannot load properties!", e)
e.printStackTrace()
} finally {
input.close()
}
}
override operator fun get(key: Any): ByteArray? {
// we want to check the last modified time when getting, because if we edit the on-disk file, we want to those changes
val lastModifiedTime = dbFile.lastModified()
if (this.lastModifiedTime != lastModifiedTime) {
// we want to reload the info
load()
}
val any = loadedProps[key]
if (any != null) {
return any
}
return null
}
/**
* Setting to NULL removes it
*/
override operator fun set(key: Any, bytes: ByteArray?) {
val hasChanged = if (bytes == null) {
loadedProps.remove(key) != null
} else {
val prev = loadedProps.put(key, bytes)
!prev.contentEquals(bytes)
}
// every time we set info, we want to save it to disk (so the file on disk will ALWAYS be current, and so we can modify it as we choose)
if (hasChanged) {
save()
}
}
fun save() {
var fos: FileOutputStream? = null
try {
fos = FileOutputStream(dbFile, false)
val properties = SortedProperties()
loadedProps.forEach { (key, value) ->
when (key) {
"_salt" -> properties[key] = Sys.bytesToHex(value)
"_private" -> properties[key] = Sys.bytesToHex(value)
is InetAddress -> properties[IP.toString(key)] = Sys.bytesToHex(value)
else -> logger.error("Unable to parse property [$key] $value")
}
}
properties.store(fos, "Server salt, public/private keys, and remote computer public Keys")
fos.flush()
properties.clear()
lastModifiedTime = dbFile.lastModified()
} catch (e: IOException) {
logger.error("Properties cannot save to: $dbFile", e)
} finally {
if (fos != null) {
try {
fos.close()
} catch (ignored: IOException) {
}
}
}
}
override fun close() {
save()
}
}

View File

@ -0,0 +1,2 @@
package dorkbox.storage.types

View File

@ -0,0 +1,2 @@
package dorkbox.storage.wip

View File

@ -0,0 +1,2 @@
package dorkbox.storage.wip

View File

@ -0,0 +1,486 @@
/*-
* #%L
* LmdbJava
* %%
* Copyright (C) 2016 - 2020 The LmdbJava Open Source Project
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package dorkboxTest.network.lmdb
import org.agrona.MutableDirectBuffer
import org.agrona.concurrent.UnsafeBuffer
import org.hamcrest.CoreMatchers
import org.hamcrest.MatcherAssert
import org.junit.Assert
import org.junit.Ignore
import org.junit.Rule
import org.junit.Test
import org.junit.rules.TemporaryFolder
import org.lmdbjava.ByteBufferProxy
import org.lmdbjava.DbiFlags
import org.lmdbjava.DirectBufferProxy
import org.lmdbjava.Env
import org.lmdbjava.GetOp
import org.lmdbjava.KeyRange
import org.lmdbjava.SeekOp
import org.lmdbjava.Verifier
import java.io.File
import java.io.IOException
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
/**
* Welcome to LmdbJava!
*
*
*
* This short tutorial will walk you through using LmdbJava step-by-step.
*
*
*
* If you are using a 64-bit Windows, Linux or OS X machine, you can simply run
* this tutorial by adding the LmdbJava JAR to your classpath. It includes the
* required system libraries. If you are using another 64-bit platform, you'll
* need to install the LMDB system library yourself. 32-bit platforms are not
* supported.
*/
@Ignore
class TutorialTest {
private val folder = TemporaryFolder()
@Rule
fun tmp(): TemporaryFolder {
return folder
}
/**
* In this first tutorial we will use LmdbJava with some basic defaults.
*
* @throws IOException if a path was unavailable for memory mapping
*/
@Test
@Throws(IOException::class)
fun tutorial1() {
// We need a storage directory first.
// The path cannot be on a remote file system.
val path = tmp().newFolder()
// We always need an Env. An Env owns a physical on-disk storage file. One
// Env can store many different databases (ie sorted maps).
val env = Env.create() // LMDB also needs to know how large our DB might be. Over-estimating is OK.
.setMapSize(10485760) // LMDB also needs to know how many DBs (Dbi) we want to store in this Env.
.setMaxDbs(1) // Now let's open the Env. The same path can be concurrently opened and
// used in different processes, but do not open the same path twice in
// the same process at the same time.
.open(path)
// We need a Dbi for each DB. A Dbi roughly equates to a sorted map. The
// MDB_CREATE flag causes the DB to be created if it doesn't already exist.
val db = env.openDbi(DB_NAME, DbiFlags.MDB_CREATE)
// We want to store some data, so we will need a direct ByteBuffer.
// Note that LMDB keys cannot exceed maxKeySize bytes (511 bytes by default).
// Values can be larger.
val key = ByteBuffer.allocateDirect(env.maxKeySize)
val `val` = ByteBuffer.allocateDirect(700)
key.put("greeting".toByteArray(StandardCharsets.UTF_8)).flip()
`val`.put("Hello world".toByteArray(StandardCharsets.UTF_8)).flip()
val valSize = `val`.remaining()
// Now store it. Dbi.put() internally begins and commits a transaction (Txn).
db.put(key, `val`)
// To fetch any data from LMDB we need a Txn. A Txn is very important in
// LmdbJava because it offers ACID characteristics and internally holds a
// read-only key buffer and read-only value buffer. These read-only buffers
// are always the same two Java objects, but point to different LMDB-managed
// memory as we use Dbi (and Cursor) methods. These read-only buffers remain
// valid only until the Txn is released or the next Dbi or Cursor call. If
// you need data afterwards, you should copy the bytes to your own buffer.
env.txnRead().use { txn ->
val found = db[txn, key]
Assert.assertNotNull(found)
// The fetchedVal is read-only and points to LMDB memory
val fetchedVal = txn.`val`()
MatcherAssert.assertThat(fetchedVal.remaining(), CoreMatchers.`is`(valSize))
// Let's double-check the fetched value is correct
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(fetchedVal).toString(), CoreMatchers.`is`("Hello world"))
}
// We can also delete. The simplest way is to let Dbi allocate a new Txn...
db.delete(key)
env.txnRead().use { txn -> Assert.assertNull(db[txn, key]) }
env.close()
}
/**
* In this second tutorial we'll learn more about LMDB's ACID Txns.
*
* @throws IOException if a path was unavailable for memory mapping
* @throws InterruptedException if executor shutdown interrupted
*/
@Test
@Throws(IOException::class, InterruptedException::class)
fun tutorial2() {
val env = createSimpleEnv(tmp().newFolder())
val db = env.openDbi(DB_NAME, DbiFlags.MDB_CREATE)
val key = ByteBuffer.allocateDirect(env.maxKeySize)
val `val` = ByteBuffer.allocateDirect(700)
// Let's write and commit "key1" via a Txn. A Txn can include multiple Dbis.
// Note write Txns block other write Txns, due to writes being serialized.
// It's therefore important to avoid unnecessarily long-lived write Txns.
env.txnWrite().use { txn ->
key.put("key1".toByteArray(StandardCharsets.UTF_8)).flip()
`val`.put("lmdb".toByteArray(StandardCharsets.UTF_8)).flip()
db.put(txn, key, `val`)
// We can read data too, even though this is a write Txn.
val found = db[txn, key]
Assert.assertNotNull(found)
// An explicit commit is required, otherwise Txn.close() rolls it back.
txn.commit()
}
// Open a read-only Txn. It only sees data that existed at Txn creation time.
val rtx = env.txnRead()
// Our read Txn can fetch key1 without problem, as it existed at Txn creation.
var found = db[rtx, key]
Assert.assertNotNull(found)
// Note that our main test thread holds the Txn. Only one Txn per thread is
// typically permitted (the exception is a read-only Env with MDB_NOTLS).
//
// Let's write out a "key2" via a new write Txn in a different thread.
val es = Executors.newCachedThreadPool()
es.execute {
env.txnWrite().use { txn ->
key.clear()
key.put("key2".toByteArray(StandardCharsets.UTF_8)).flip()
db.put(txn, key, `val`)
txn.commit()
}
}
es.shutdown()
es.awaitTermination(10, TimeUnit.SECONDS)
// Even though key2 has been committed, our read Txn still can't see it.
found = db[rtx, key]
Assert.assertNull(found)
// To see key2, we could create a new Txn. But a reset/renew is much faster.
// Reset/renew is also important to avoid long-lived read Txns, as these
// prevent the re-use of free pages by write Txns (ie the DB will grow).
rtx.reset()
// ... potentially long operation here ...
rtx.renew()
found = db[rtx, key]
Assert.assertNotNull(found)
// Don't forget to close the read Txn now we're completely finished. We could
// have avoided this if we used a try-with-resources block, but we wanted to
// play around with multiple concurrent Txns to demonstrate the "I" in ACID.
rtx.close()
env.close()
}
/**
* In this third tutorial we'll have a look at the Cursor. Up until now we've
* just used Dbi, which is good enough for simple cases but unsuitable if you
* don't know the key to fetch, or want to iterate over all the data etc.
*
* @throws IOException if a path was unavailable for memory mapping
*/
@Test
@Throws(IOException::class)
fun tutorial3() {
val env = createSimpleEnv(tmp().newFolder())
val db = env.openDbi(DB_NAME, DbiFlags.MDB_CREATE)
val key = ByteBuffer.allocateDirect(env.maxKeySize)
val `val` = ByteBuffer.allocateDirect(700)
env.txnWrite().use { txn ->
// A cursor always belongs to a particular Dbi.
val c = db.openCursor(txn)
// We can put via a Cursor. Note we're adding keys in a strange order,
// as we want to show you that LMDB returns them in sorted order.
key.put("zzz".toByteArray(StandardCharsets.UTF_8)).flip()
`val`.put("lmdb".toByteArray(StandardCharsets.UTF_8)).flip()
c.put(key, `val`)
key.clear()
key.put("aaa".toByteArray(StandardCharsets.UTF_8)).flip()
c.put(key, `val`)
key.clear()
key.put("ccc".toByteArray(StandardCharsets.UTF_8)).flip()
c.put(key, `val`)
// We can read from the Cursor by key.
c[key, GetOp.MDB_SET]
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.key()).toString(), CoreMatchers.`is`("ccc"))
// Let's see that LMDB provides the keys in appropriate order....
c.seek(SeekOp.MDB_FIRST)
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.key()).toString(), CoreMatchers.`is`("aaa"))
c.seek(SeekOp.MDB_LAST)
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.key()).toString(), CoreMatchers.`is`("zzz"))
c.seek(SeekOp.MDB_PREV)
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.key()).toString(), CoreMatchers.`is`("ccc"))
// Cursors can also delete the current key.
c.delete()
c.close()
txn.commit()
}
// A read-only Cursor can survive its original Txn being closed. This is
// useful if you want to close the original Txn (eg maybe you created the
// Cursor during the constructor of a singleton with a throw-away Txn). Of
// course, you cannot use the Cursor if its Txn is closed or currently reset.
val tx1 = env.txnRead()
val c = db.openCursor(tx1)
tx1.close()
// The Cursor becomes usable again by "renewing" it with an active read Txn.
val tx2 = env.txnRead()
c.renew(tx2)
c.seek(SeekOp.MDB_FIRST)
// As usual with read Txns, we can reset and renew them. The Cursor does
// not need any special handling if we do this.
tx2.reset()
// ... potentially long operation here ...
tx2.renew()
c.seek(SeekOp.MDB_LAST)
tx2.close()
env.close()
}
/**
* In this fourth tutorial we'll take a quick look at the iterators. These are
* a more Java idiomatic form of using the Cursors we looked at in tutorial 3.
*
* @throws IOException if a path was unavailable for memory mapping
*/
@Test
@Throws(IOException::class)
fun tutorial4() {
val env = createSimpleEnv(tmp().newFolder())
val db = env.openDbi(DB_NAME, DbiFlags.MDB_CREATE)
env.txnWrite().use { txn ->
val key = ByteBuffer.allocateDirect(env.maxKeySize)
val `val` = ByteBuffer.allocateDirect(700)
// Insert some data. Note that ByteBuffer order defaults to Big Endian.
// LMDB does not persist the byte order, but it's critical to sort keys.
// If your numeric keys don't sort as expected, review buffer byte order.
`val`.putInt(100)
key.putInt(1)
db.put(txn, key, `val`)
key.clear()
key.putInt(2)
db.put(txn, key, `val`)
key.clear()
// Each iterable uses a cursor and must be closed when finished. Iterate
// forward in terms of key ordering starting with the first key.
db.iterate(txn, KeyRange.all()).use { ci ->
for (kv in ci) {
MatcherAssert.assertThat(kv.key(), CoreMatchers.notNullValue())
MatcherAssert.assertThat(kv.`val`(), CoreMatchers.notNullValue())
}
}
// Iterate backward in terms of key ordering starting with the last key.
db.iterate(txn, KeyRange.allBackward()).use { ci ->
for (kv in ci) {
MatcherAssert.assertThat(kv.key(), CoreMatchers.notNullValue())
MatcherAssert.assertThat(kv.`val`(), CoreMatchers.notNullValue())
}
}
// There are many ways to control the desired key range via KeyRange, such
// as arbitrary start and stop values, direction etc. We've adopted Guava's
// terminology for our range classes (see KeyRangeType for further details).
key.putInt(1)
val range = KeyRange.atLeastBackward(key)
db.iterate(txn, range).use { ci ->
for (kv in ci) {
MatcherAssert.assertThat(kv.key(), CoreMatchers.notNullValue())
MatcherAssert.assertThat(kv.`val`(), CoreMatchers.notNullValue())
}
}
}
env.close()
}
/**
* In this fifth tutorial we'll explore multiple values sharing a single key.
*
* @throws IOException if a path was unavailable for memory mapping
*/
@Test
@Throws(IOException::class)
fun tutorial5() {
val env = createSimpleEnv(tmp().newFolder())
// This time we're going to tell the Dbi it can store > 1 value per key.
// There are other flags available if we're storing integers etc.
val db = env.openDbi(DB_NAME, DbiFlags.MDB_CREATE, DbiFlags.MDB_DUPSORT)
// Duplicate support requires both keys and values to be <= max key size.
val key = ByteBuffer.allocateDirect(env.maxKeySize)
val `val` = ByteBuffer.allocateDirect(env.maxKeySize)
env.txnWrite().use { txn ->
val c = db.openCursor(txn)
// Store one key, but many values, and in non-natural order.
key.put("key".toByteArray(StandardCharsets.UTF_8)).flip()
`val`.put("xxx".toByteArray(StandardCharsets.UTF_8)).flip()
c.put(key, `val`)
`val`.clear()
`val`.put("kkk".toByteArray(StandardCharsets.UTF_8)).flip()
c.put(key, `val`)
`val`.clear()
`val`.put("lll".toByteArray(StandardCharsets.UTF_8)).flip()
c.put(key, `val`)
// Cursor can tell us how many values the current key has.
val count = c.count()
MatcherAssert.assertThat(count, CoreMatchers.`is`(3L))
// Let's position the Cursor. Note sorting still works.
c.seek(SeekOp.MDB_FIRST)
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.`val`()).toString(), CoreMatchers.`is`("kkk"))
c.seek(SeekOp.MDB_LAST)
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.`val`()).toString(), CoreMatchers.`is`("xxx"))
c.seek(SeekOp.MDB_PREV)
MatcherAssert.assertThat(StandardCharsets.UTF_8.decode(c.`val`()).toString(), CoreMatchers.`is`("lll"))
c.close()
txn.commit()
}
env.close()
}
/**
* Next up we'll show you how to easily check your platform (operating system
* and Java version) is working properly with LmdbJava and the embedded LMDB
* native library.
*
* @throws IOException if a path was unavailable for memory mapping
*/
@Test
@Throws(IOException::class)
fun tutorial6() {
// Note we need to specify the Verifier's DBI_COUNT for the Env.
val env = Env.create(ByteBufferProxy.PROXY_OPTIMAL).setMapSize(10485760).setMaxDbs(Verifier.DBI_COUNT).open(tmp().newFolder())
// Create a Verifier (it's a Callable<Long> for those needing full control).
val v = Verifier(env)
// We now run the verifier for 3 seconds; it raises an exception on failure.
// The method returns the number of entries it successfully verified.
v.runFor(3, TimeUnit.SECONDS)
env.close()
}
/**
* In this final tutorial we'll look at using Agrona's DirectBuffer.
*
* @throws IOException if a path was unavailable for memory mapping
*/
@Test
@Throws(IOException::class)
fun tutorial7() {
// The critical difference is we pass the PROXY_DB field to Env.create().
// There's also a PROXY_SAFE if you want to stop ByteBuffer's Unsafe use.
// Aside from that and a different type argument, it's the same as usual...
val env = Env.create(DirectBufferProxy.PROXY_DB).setMapSize(10485760).setMaxDbs(1).open(tmp().newFolder())
val db = env.openDbi(DB_NAME, DbiFlags.MDB_CREATE)
val keyBb = ByteBuffer.allocateDirect(env.maxKeySize)
val key: MutableDirectBuffer = UnsafeBuffer(keyBb)
val `val`: MutableDirectBuffer = UnsafeBuffer(ByteBuffer.allocateDirect(700))
env.txnWrite().use { txn ->
db.openCursor(txn).use { c ->
// Agrona is faster than ByteBuffer and its methods are nicer...
`val`.putStringWithoutLengthUtf8(0, "The Value")
key.putStringWithoutLengthUtf8(0, "yyy")
c.put(key, `val`)
key.putStringWithoutLengthUtf8(0, "ggg")
c.put(key, `val`)
c.seek(SeekOp.MDB_FIRST)
MatcherAssert.assertThat(c.key().getStringWithoutLengthUtf8(0, env.maxKeySize), CoreMatchers.startsWith("ggg"))
c.seek(SeekOp.MDB_LAST)
MatcherAssert.assertThat(c.key().getStringWithoutLengthUtf8(0, env.maxKeySize), CoreMatchers.startsWith("yyy"))
// DirectBuffer has no position concept. Often you don't want to store
// the unnecessary bytes of a varying-size buffer. Let's have a look...
val keyLen = key.putStringWithoutLengthUtf8(0, "12characters")
MatcherAssert.assertThat(keyLen, CoreMatchers.`is`(12))
MatcherAssert.assertThat(key.capacity(), CoreMatchers.`is`(env.maxKeySize))
// To only store the 12 characters, we simply call wrap:
key.wrap(key, 0, keyLen)
MatcherAssert.assertThat(key.capacity(), CoreMatchers.`is`(keyLen))
c.put(key, `val`)
c.seek(SeekOp.MDB_FIRST)
MatcherAssert.assertThat(c.key().capacity(), CoreMatchers.`is`(keyLen))
MatcherAssert.assertThat(c.key().getStringWithoutLengthUtf8(0, c.key().capacity()), CoreMatchers.`is`("12characters"))
// To store bigger values again, just wrap the original buffer.
key.wrap(keyBb)
MatcherAssert.assertThat(key.capacity(), CoreMatchers.`is`(env.maxKeySize))
}
txn.commit()
}
env.close()
}
// You've finished! There are lots of other neat things we could show you (eg
// how to speed up inserts by appending them in key order, using integer
// or reverse ordered keys, using Env.DISABLE_CHECKS_PROP etc), but you now
// know enough to tackle the JavaDocs with confidence. Have fun!
private fun createSimpleEnv(path: File): Env<ByteBuffer> {
return Env.create().setMapSize(10485760).setMaxDbs(1).setMaxReaders(1).open(path)
}
companion object {
private const val DB_NAME = "my DB"
}
}

View File

@ -0,0 +1,613 @@
/*
* Copyright 2021 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.storage
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.io.Input
import com.esotericsoftware.kryo.io.Output
import org.junit.Assert
import org.junit.FixMethodOrder
import org.junit.Rule
import org.junit.Test
import org.junit.rules.TemporaryFolder
import org.junit.runners.MethodSorters
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
class StorageTest {
data class Tester(val key: String, val value: String)
class TesterSerializer : com.esotericsoftware.kryo.Serializer<Tester>() {
override fun write(kryo: Kryo, output: Output, `object`: Tester) {
output.writeString(`object`.key)
output.writeString(`object`.value)
}
override fun read(kryo: Kryo?, input: Input, type: Class<out Tester>): Tester {
return Tester(input.readString(), input.readString())
}
}
init {
System.setProperty("kryo.unsafe", "false")
}
private val folder = TemporaryFolder()
@Rule
fun tmp(): TemporaryFolder {
return folder
}
@Test
fun memoryStoreDB() {
val tester = Tester("key1", "value1")
// note: we want to auto-close the storage after we write/read it
val storage = Storage.Memory().register(Tester::class.java, TesterSerializer()).build()
storage.use {
it["key1"] = tester
}
val tes: Tester? = storage.use {
it["key1"]
}
Assert.assertTrue(tes == tester)
}
@Test
fun propertyStoreDB() {
val tmp = tmp().newFile()
val tester = Tester("key1", "value1")
// note: we want to auto-close the storage after we write/read it
val storage = Storage.Property().file(tmp).register(Tester::class.java, TesterSerializer()).build()
storage.use {
it["key1"] = tester
}
val storage2 = Storage.Property().file(tmp).register(Tester::class.java, TesterSerializer()).build()
val tes: Tester? = storage2.use {
it["key1"]
}
Assert.assertTrue(tes == tester)
}
@Test
fun propertyStoreReadOnlyDB() {
System.setProperty("kryo.unsafe", "false")
val tmp = tmp().newFile()
// note: we want to auto-close the storage after we write/read it
val storage = Storage.Property().file(tmp).readOnly().build()
try {
storage.use {
it["key1"] = 0
}
Assert.fail("no exception thrown!")
} catch (e: Exception) {
e.printStackTrace()
}
}
// @Test
// fun testCreateDB() {
// // val store = Storage.Memory().build()
// StorageSystem.shutdown()
// StorageSystem.delete(TEST_DB)
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
// val numberOfRecords1 = storage.size()
// val size1: Long = storage.getFileSize()
// Assert.assertEquals("count is not correct", 0, numberOfRecords1.toLong())
// Assert.assertEquals("size is not correct", initialSize, size1)
// storage.close()
// storage = StorageSystem.Disk().file(TEST_DB).build()
// val numberOfRecords2 = storage.size()
// val size2: Long = storage.getFileSize()
// Assert.assertEquals("Record count is not the same", numberOfRecords1.toLong(), numberOfRecords2.toLong())
// Assert.assertEquals("size is not the same", size1, size2)
// StorageSystem.close(storage)
// }
// @Test
// fun testAddAsOne() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
// for (i in 0 until total) {
// add(storage, i)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).build()
// for (i in 0 until total) {
// val record1Data = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", record1Data, readRecord)
// }
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// /**
// * Adds data to storage using the SAME key each time (so each entry is overwritten).
// */
// @Test
// fun testAddNoKeyRecords() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
// val storageKey = StorageKey("foobar!")
// for (i in 0 until total) {
// log("adding record $i...")
// val addRecord = createData(i)
// storage.put(storageKey, addRecord)
// log("reading record $i...")
// val readData: String = storage.get(storageKey)!!
// Assert.assertEquals("Object is not the same", addRecord, readData)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).build()
// val dataCheck = createData(total - 1)
// log("reading record " + (total - 1) + "...")
// val readData: String = storage.get(storageKey)
//
// // the ONLY entry in storage should be the last one that we added
// Assert.assertEquals("Object is not the same", dataCheck, readData)
// val numberOfRecords1 = storage.size()
// val size1: Long = storage.getFileSize()
// Assert.assertEquals("count is not correct", numberOfRecords1.toLong(), 1)
// Assert.assertEquals("size is not correct", size1, initialSize + sizePerRecord)
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// @Test
// fun testAddRecords_DelaySaveA() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
// for (i in 0 until total) {
// add(storage, i)
// }
// synchronized(Thread.currentThread()) {
// Thread.currentThread().wait(storage.getSaveDelay() + 1000L)
// }
// for (i in 0 until total) {
// val record1Data = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", record1Data, readRecord)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).build()
// for (i in 0 until total) {
// val dataCheck = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// }
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// @Test
// fun testAddRecords_DelaySaveB() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
// for (i in 0 until total) {
// add(storage, i)
// }
// for (i in 0 until total) {
// val record1Data = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", record1Data, readRecord)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).build()
// for (i in 0 until total) {
// val dataCheck = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// }
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// @Test
// fun testLoadRecords() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val addRecord = add(storage, i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", addRecord, readRecord)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val dataCheck = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// }
//
// // now test loading data
// val data = Data()
// val createKey: StorageKey = createKey(63)
// makeData(data)
// storage.put(createKey, data)
// var data2: Data
// data2 = storage.get(createKey, Data())
// Assert.assertEquals("Object is not the same", data, data2)
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// data2 = storage.get(createKey, Data())
// Assert.assertEquals("Object is not the same", data, data2)
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// @Test
// @Throws(IOException::class)
// fun testAddRecordsDelete1Record() {
// if (total < 4) {
// throw IOException("Unable to run test with too few entries.")
// }
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val addRecord = add(storage, i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", addRecord, readRecord)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val dataCheck = createData(i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// }
//
// // make sure now that we can delete one of the records.
// deleteRecord(storage, 3)
// var readRecord = readRecord(storage, 9)
// var dataCheck = createData(9)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// if (storage.contains(createKey(3))) {
// Assert.fail("record NOT successfully deleted.")
// }
//
// // now we add 3 back
// val addRecord = add(storage, 3)
// dataCheck = createData(3)
// Assert.assertEquals("Object is not the same", dataCheck, addRecord)
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
//
// // check 9 again
// readRecord = readRecord(storage, 9)
// dataCheck = createData(9)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
//
// // check 3 again
// readRecord = readRecord(storage, 3)
// dataCheck = createData(3)
// Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// @Test
// fun testUpdateRecords() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val addRecord = add(storage, i)
// val readRecord = readRecord(storage, i)
// Assert.assertEquals("Object is not the same", addRecord, readRecord)
// }
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// var updateRecord = updateRecord(storage, 3, createData(3) + "new")
// var readRecord = readRecord(storage, 3)
// Assert.assertEquals("Object is not the same", updateRecord, readRecord)
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// readRecord = readRecord(storage, 3)
// Assert.assertEquals("Object is not the same", updateRecord, readRecord)
// updateRecord = updateRecord(storage, 3, createData(3))
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// readRecord = readRecord(storage, 3)
// Assert.assertEquals("Object is not the same", updateRecord, readRecord)
// StorageSystem.close(storage)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// updateRecord = updateRecord(storage, 0, createData(0) + "new")
// readRecord = readRecord(storage, 0)
// Assert.assertEquals("Object is not the same", updateRecord, readRecord)
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// @Test
// fun testSaveAllRecords() {
// try {
// var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val data = Data()
// makeData(data)
// val createKey: StorageKey = createKey(i)
// storage.put(createKey, data)
// }
// StorageSystem.close(storage)
// val data = Data()
// makeData(data)
// storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// for (i in 0 until total) {
// val createKey: StorageKey = createKey(i)
// var data2: Data
// data2 = storage.get(createKey, Data())
// Assert.assertEquals("Object is not the same", data, data2)
// }
// StorageSystem.close(storage)
// } catch (e: Exception) {
// e.printStackTrace()
// Assert.fail("Error!")
// }
// }
//
// class Data {
// var string: String? = null
// var strings: Array<String?>
// var ints: IntArray
// var shorts: ShortArray
// var floats: FloatArray
// var doubles: DoubleArray
// var longs: LongArray
// var bytes: ByteArray
// var chars: CharArray
// var booleans: BooleanArray
// var Ints: Array<Int>
// var Shorts: Array<Short>
// var Floats: Array<Float>
// var Doubles: Array<Double>
// var Longs: Array<Long>
// var Bytes: Array<Byte>
// var Chars: Array<Char>
// var Booleans: Array<Boolean>
// override fun hashCode(): Int {
// val prime = 31
// var result = 1
// result = prime * result + Arrays.hashCode(Booleans)
// result = prime * result + Arrays.hashCode(Bytes)
// result = prime * result + Arrays.hashCode(Chars)
// result = prime * result + Arrays.hashCode(Doubles)
// result = prime * result + Arrays.hashCode(Floats)
// result = prime * result + Arrays.hashCode(Ints)
// result = prime * result + Arrays.hashCode(Longs)
// result = prime * result + Arrays.hashCode(Shorts)
// result = prime * result + Arrays.hashCode(booleans)
// result = prime * result + Arrays.hashCode(bytes)
// result = prime * result + Arrays.hashCode(chars)
// result = prime * result + Arrays.hashCode(doubles)
// result = prime * result + Arrays.hashCode(floats)
// result = prime * result + Arrays.hashCode(ints)
// result = prime * result + Arrays.hashCode(longs)
// result = prime * result + Arrays.hashCode(shorts)
// result = prime * result + if (string == null) 0 else string.hashCode()
// result = prime * result + Arrays.hashCode(strings)
// return result
// }
//
// override fun equals(obj: Any?): Boolean {
// if (this === obj) {
// return true
// }
// if (obj == null) {
// return false
// }
// if (javaClass != obj.javaClass) {
// return false
// }
// val other = obj as Data
// if (!Arrays.equals(Booleans, other.Booleans)) {
// return false
// }
// if (!Arrays.equals(Bytes, other.Bytes)) {
// return false
// }
// if (!Arrays.equals(Chars, other.Chars)) {
// return false
// }
// if (!Arrays.equals(Doubles, other.Doubles)) {
// return false
// }
// if (!Arrays.equals(Floats, other.Floats)) {
// return false
// }
// if (!Arrays.equals(Ints, other.Ints)) {
// return false
// }
// if (!Arrays.equals(Longs, other.Longs)) {
// return false
// }
// if (!Arrays.equals(Shorts, other.Shorts)) {
// return false
// }
// if (!Arrays.equals(booleans, other.booleans)) {
// return false
// }
// if (!Arrays.equals(bytes, other.bytes)) {
// return false
// }
// if (!Arrays.equals(chars, other.chars)) {
// return false
// }
// if (!Arrays.equals(doubles, other.doubles)) {
// return false
// }
// if (!Arrays.equals(floats, other.floats)) {
// return false
// }
// if (!Arrays.equals(ints, other.ints)) {
// return false
// }
// if (!Arrays.equals(longs, other.longs)) {
// return false
// }
// if (!Arrays.equals(shorts, other.shorts)) {
// return false
// }
// if (string == null) {
// if (other.string != null) {
// return false
// }
// } else if (string != other.string) {
// return false
// }
// return if (!Arrays.equals(strings, other.strings)) {
// false
// } else true
// }
//
// override fun toString(): String {
// return "Data"
// }
// }
//
// companion object {
// var total = 10
//
// // the initial size is specified during disk.storage construction, and is based on the number of padded records.
// private const val initialSize = 1024L
//
// // this is the size for each record (determined by looking at the output when writing the file)
// private const val sizePerRecord = 23
// private val TEST_DB = File("sampleFile.records")
// fun log(s: String?) {
// System.err.println(s)
// }
//
// private fun createData(number: Int): String {
// return "$number data for record # $number"
// }
//
// fun add(storage: Storage, number: Int): String {
// val record1Data = createData(number)
// val record1Key: StorageKey = createKey(number)
// log("adding record $number...")
// storage.put(record1Key, record1Data)
// return record1Data
// }
//
// fun readRecord(storage: Storage, number: Int): String {
// val record1Key: StorageKey = createKey(number)
// log("reading record $number...")
// val readData: String = storage.get(record1Key)!!
// log("\trecord $number data: '$readData'")
// return readData
// }
//
// fun deleteRecord(storage: Storage, nNumber: Int) {
// val record1Key: StorageKey = createKey(nNumber)
// log("deleting record $nNumber...")
// storage.delete(record1Key)
// }
//
// private fun updateRecord(storage: Storage, number: Int, newData: String): String {
// val record1Key: StorageKey = createKey(number)
// log("updating record $number...")
// storage.put(record1Key, newData)
// return newData
// }
//
// private fun createKey(number: Int): StorageKey {
// return StorageKey("foo$number")
// }
//
// // from kryo unit test.
// private fun makeData(data: Data) {
// val buffer = StringBuilder(128)
// for (i in 0..2) {
// buffer.append('a')
// }
// data.string = buffer.toString()
// data.strings = arrayOf("ab012", "", null, "!@#$", "<22><><EFBFBD><EFBFBD><EFBFBD>")
// data.ints = intArrayOf(-1234567, 1234567, -1, 0, 1, Int.MAX_VALUE, Int.MIN_VALUE)
// data.shorts = shortArrayOf(
// (-12345).toShort(),
// 12345.toShort(),
// (-1).toShort(),
// 0.toShort(),
// 1.toShort(),
// Short.MAX_VALUE,
// Short.MIN_VALUE
// )
// data.floats =
// floatArrayOf(0f, -0f, 1f, -1f, 123456f, -123456f, 0.1f, 0.2f, -0.3f, Math.PI.toFloat(), Float.MAX_VALUE, Float.MIN_VALUE)
// data.doubles =
// doubleArrayOf(0.0, -0.0, 1.0, -1.0, 123456.0, -123456.0, 0.1, 0.2, -0.3, Math.PI, Double.MAX_VALUE, Double.MIN_VALUE)
// data.longs = longArrayOf(0, -0, 1, -1, 123456, -123456, 99999999999L, -99999999999L, Long.MAX_VALUE, Long.MIN_VALUE)
// data.bytes = byteArrayOf((-123).toByte(), 123.toByte(), (-1).toByte(), 0.toByte(), 1.toByte(), Byte.MAX_VALUE, Byte.MIN_VALUE)
// data.chars =
// charArrayOf(32345.toChar(), 12345.toChar(), 0.toChar(), 1.toChar(), 63.toChar(), Character.MAX_VALUE, Character.MIN_VALUE)
// data.booleans = booleanArrayOf(true, false)
// data.Ints = arrayOf(-1234567, 1234567, -1, 0, 1, Int.MAX_VALUE, Int.MIN_VALUE)
// data.Shorts = arrayOf(-12345, 12345, -1, 0, 1, Short.MAX_VALUE, Short.MIN_VALUE)
// data.Floats = arrayOf(
// 0.0f,
// -0.0f,
// 1.0f,
// -1.0f,
// 123456.0f,
// -123456.0f,
// 0.1f,
// 0.2f,
// -0.3f,
// Math.PI.toFloat(),
// Float.MAX_VALUE,
// Float.MIN_VALUE
// )
// data.Doubles = arrayOf(0.0, -0.0, 1.0, -1.0, 123456.0, -123456.0, 0.1, 0.2, -0.3, Math.PI, Double.MAX_VALUE, Double.MIN_VALUE)
// data.Longs = arrayOf(0L, -0L, 1L, -1L, 123456L, -123456L, 99999999999L, -99999999999L, Long.MAX_VALUE, Long.MIN_VALUE)
// data.Bytes = arrayOf(-123, 123, -1, 0, 1, Byte.MAX_VALUE, Byte.MIN_VALUE)
// data.Chars = arrayOf(32345, 12345, 0, 1, 63, Character.MAX_VALUE, Character.MIN_VALUE)
// data.Booleans = arrayOf(true, false)
// }
// }
}

View File

@ -0,0 +1,535 @@
/*
* Copyright 2021 dorkbox, llc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dorkbox.storage
import dorkbox.storage.types.Storage
import dorkbox.storage.types.Storage.size
import dorkbox.storage.types.Storage.close
import dorkbox.storage.types.Storage.get
import dorkbox.storage.types.Storage.contains
import dorkbox.storage.types.Storage.delete
import org.junit.Assert
import org.junit.FixMethodOrder
import org.junit.Test
import org.junit.runners.MethodSorters
import java.io.File
import java.io.IOException
import java.lang.Exception
import java.lang.StringBuilder
import java.util.*
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
class StorageTest {
@Test
fun testCreateDB() {
// val store = Storage.Memory().build()
StorageSystem.shutdown()
StorageSystem.delete(TEST_DB)
var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
val numberOfRecords1 = storage.size()
val size1: Long = storage.getFileSize()
Assert.assertEquals("count is not correct", 0, numberOfRecords1.toLong())
Assert.assertEquals("size is not correct", initialSize, size1)
storage.close()
storage = StorageSystem.Disk().file(TEST_DB).build()
val numberOfRecords2 = storage.size()
val size2: Long = storage.getFileSize()
Assert.assertEquals("Record count is not the same", numberOfRecords1.toLong(), numberOfRecords2.toLong())
Assert.assertEquals("size is not the same", size1, size2)
StorageSystem.close(storage)
}
@Test
fun testAddAsOne() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
for (i in 0 until total) {
add(storage, i)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).build()
for (i in 0 until total) {
val record1Data = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", record1Data, readRecord)
}
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
/**
* Adds data to storage using the SAME key each time (so each entry is overwritten).
*/
@Test
fun testAddNoKeyRecords() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
val storageKey = StorageKey("foobar!")
for (i in 0 until total) {
log("adding record $i...")
val addRecord = createData(i)
storage.put(storageKey, addRecord)
log("reading record $i...")
val readData: String = storage.get(storageKey)!!
Assert.assertEquals("Object is not the same", addRecord, readData)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).build()
val dataCheck = createData(total - 1)
log("reading record " + (total - 1) + "...")
val readData: String = storage.get(storageKey)
// the ONLY entry in storage should be the last one that we added
Assert.assertEquals("Object is not the same", dataCheck, readData)
val numberOfRecords1 = storage.size()
val size1: Long = storage.getFileSize()
Assert.assertEquals("count is not correct", numberOfRecords1.toLong(), 1)
Assert.assertEquals("size is not correct", size1, initialSize + sizePerRecord)
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
@Test
fun testAddRecords_DelaySaveA() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
for (i in 0 until total) {
add(storage, i)
}
synchronized(Thread.currentThread()) {
Thread.currentThread().wait(storage.getSaveDelay() + 1000L)
}
for (i in 0 until total) {
val record1Data = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", record1Data, readRecord)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).build()
for (i in 0 until total) {
val dataCheck = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
}
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
@Test
fun testAddRecords_DelaySaveB() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).build()
for (i in 0 until total) {
add(storage, i)
}
for (i in 0 until total) {
val record1Data = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", record1Data, readRecord)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).build()
for (i in 0 until total) {
val dataCheck = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
}
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
@Test
fun testLoadRecords() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val addRecord = add(storage, i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", addRecord, readRecord)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val dataCheck = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
}
// now test loading data
val data = Data()
val createKey: StorageKey = createKey(63)
makeData(data)
storage.put(createKey, data)
var data2: Data
data2 = storage.get(createKey, Data())
Assert.assertEquals("Object is not the same", data, data2)
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
data2 = storage.get(createKey, Data())
Assert.assertEquals("Object is not the same", data, data2)
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
@Test
@Throws(IOException::class)
fun testAddRecordsDelete1Record() {
if (total < 4) {
throw IOException("Unable to run test with too few entries.")
}
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val addRecord = add(storage, i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", addRecord, readRecord)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val dataCheck = createData(i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
}
// make sure now that we can delete one of the records.
deleteRecord(storage, 3)
var readRecord = readRecord(storage, 9)
var dataCheck = createData(9)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
if (storage.contains(createKey(3))) {
Assert.fail("record NOT successfully deleted.")
}
// now we add 3 back
val addRecord = add(storage, 3)
dataCheck = createData(3)
Assert.assertEquals("Object is not the same", dataCheck, addRecord)
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
// check 9 again
readRecord = readRecord(storage, 9)
dataCheck = createData(9)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
// check 3 again
readRecord = readRecord(storage, 3)
dataCheck = createData(3)
Assert.assertEquals("Object is not the same", dataCheck, readRecord)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
@Test
fun testUpdateRecords() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val addRecord = add(storage, i)
val readRecord = readRecord(storage, i)
Assert.assertEquals("Object is not the same", addRecord, readRecord)
}
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
var updateRecord = updateRecord(storage, 3, createData(3) + "new")
var readRecord = readRecord(storage, 3)
Assert.assertEquals("Object is not the same", updateRecord, readRecord)
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
readRecord = readRecord(storage, 3)
Assert.assertEquals("Object is not the same", updateRecord, readRecord)
updateRecord = updateRecord(storage, 3, createData(3))
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
readRecord = readRecord(storage, 3)
Assert.assertEquals("Object is not the same", updateRecord, readRecord)
StorageSystem.close(storage)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
updateRecord = updateRecord(storage, 0, createData(0) + "new")
readRecord = readRecord(storage, 0)
Assert.assertEquals("Object is not the same", updateRecord, readRecord)
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
@Test
fun testSaveAllRecords() {
try {
var storage: Storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val data = Data()
makeData(data)
val createKey: StorageKey = createKey(i)
storage.put(createKey, data)
}
StorageSystem.close(storage)
val data = Data()
makeData(data)
storage = StorageSystem.Disk().file(TEST_DB).register(Data::class.java).build()
for (i in 0 until total) {
val createKey: StorageKey = createKey(i)
var data2: Data
data2 = storage.get(createKey, Data())
Assert.assertEquals("Object is not the same", data, data2)
}
StorageSystem.close(storage)
} catch (e: Exception) {
e.printStackTrace()
Assert.fail("Error!")
}
}
class Data {
var string: String? = null
var strings: Array<String?>
var ints: IntArray
var shorts: ShortArray
var floats: FloatArray
var doubles: DoubleArray
var longs: LongArray
var bytes: ByteArray
var chars: CharArray
var booleans: BooleanArray
var Ints: Array<Int>
var Shorts: Array<Short>
var Floats: Array<Float>
var Doubles: Array<Double>
var Longs: Array<Long>
var Bytes: Array<Byte>
var Chars: Array<Char>
var Booleans: Array<Boolean>
override fun hashCode(): Int {
val prime = 31
var result = 1
result = prime * result + Arrays.hashCode(Booleans)
result = prime * result + Arrays.hashCode(Bytes)
result = prime * result + Arrays.hashCode(Chars)
result = prime * result + Arrays.hashCode(Doubles)
result = prime * result + Arrays.hashCode(Floats)
result = prime * result + Arrays.hashCode(Ints)
result = prime * result + Arrays.hashCode(Longs)
result = prime * result + Arrays.hashCode(Shorts)
result = prime * result + Arrays.hashCode(booleans)
result = prime * result + Arrays.hashCode(bytes)
result = prime * result + Arrays.hashCode(chars)
result = prime * result + Arrays.hashCode(doubles)
result = prime * result + Arrays.hashCode(floats)
result = prime * result + Arrays.hashCode(ints)
result = prime * result + Arrays.hashCode(longs)
result = prime * result + Arrays.hashCode(shorts)
result = prime * result + if (string == null) 0 else string.hashCode()
result = prime * result + Arrays.hashCode(strings)
return result
}
override fun equals(obj: Any?): Boolean {
if (this === obj) {
return true
}
if (obj == null) {
return false
}
if (javaClass != obj.javaClass) {
return false
}
val other = obj as Data
if (!Arrays.equals(Booleans, other.Booleans)) {
return false
}
if (!Arrays.equals(Bytes, other.Bytes)) {
return false
}
if (!Arrays.equals(Chars, other.Chars)) {
return false
}
if (!Arrays.equals(Doubles, other.Doubles)) {
return false
}
if (!Arrays.equals(Floats, other.Floats)) {
return false
}
if (!Arrays.equals(Ints, other.Ints)) {
return false
}
if (!Arrays.equals(Longs, other.Longs)) {
return false
}
if (!Arrays.equals(Shorts, other.Shorts)) {
return false
}
if (!Arrays.equals(booleans, other.booleans)) {
return false
}
if (!Arrays.equals(bytes, other.bytes)) {
return false
}
if (!Arrays.equals(chars, other.chars)) {
return false
}
if (!Arrays.equals(doubles, other.doubles)) {
return false
}
if (!Arrays.equals(floats, other.floats)) {
return false
}
if (!Arrays.equals(ints, other.ints)) {
return false
}
if (!Arrays.equals(longs, other.longs)) {
return false
}
if (!Arrays.equals(shorts, other.shorts)) {
return false
}
if (string == null) {
if (other.string != null) {
return false
}
} else if (string != other.string) {
return false
}
return if (!Arrays.equals(strings, other.strings)) {
false
} else true
}
override fun toString(): String {
return "Data"
}
}
companion object {
var total = 10
// the initial size is specified during disk.storage construction, and is based on the number of padded records.
private const val initialSize = 1024L
// this is the size for each record (determined by looking at the output when writing the file)
private const val sizePerRecord = 23
private val TEST_DB = File("sampleFile.records")
fun log(s: String?) {
System.err.println(s)
}
private fun createData(number: Int): String {
return "$number data for record # $number"
}
fun add(storage: Storage, number: Int): String {
val record1Data = createData(number)
val record1Key: StorageKey = createKey(number)
log("adding record $number...")
storage.put(record1Key, record1Data)
return record1Data
}
fun readRecord(storage: Storage, number: Int): String {
val record1Key: StorageKey = createKey(number)
log("reading record $number...")
val readData: String = storage.get(record1Key)!!
log("\trecord $number data: '$readData'")
return readData
}
fun deleteRecord(storage: Storage, nNumber: Int) {
val record1Key: StorageKey = createKey(nNumber)
log("deleting record $nNumber...")
storage.delete(record1Key)
}
private fun updateRecord(storage: Storage, number: Int, newData: String): String {
val record1Key: StorageKey = createKey(number)
log("updating record $number...")
storage.put(record1Key, newData)
return newData
}
private fun createKey(number: Int): StorageKey {
return StorageKey("foo$number")
}
// from kryo unit test.
private fun makeData(data: Data) {
val buffer = StringBuilder(128)
for (i in 0..2) {
buffer.append('a')
}
data.string = buffer.toString()
data.strings = arrayOf("ab012", "", null, "!@#$", "<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>")
data.ints = intArrayOf(-1234567, 1234567, -1, 0, 1, Int.MAX_VALUE, Int.MIN_VALUE)
data.shorts = shortArrayOf(
(-12345).toShort(),
12345.toShort(),
(-1).toShort(),
0.toShort(),
1.toShort(),
Short.MAX_VALUE,
Short.MIN_VALUE
)
data.floats =
floatArrayOf(0f, -0f, 1f, -1f, 123456f, -123456f, 0.1f, 0.2f, -0.3f, Math.PI.toFloat(), Float.MAX_VALUE, Float.MIN_VALUE)
data.doubles =
doubleArrayOf(0.0, -0.0, 1.0, -1.0, 123456.0, -123456.0, 0.1, 0.2, -0.3, Math.PI, Double.MAX_VALUE, Double.MIN_VALUE)
data.longs = longArrayOf(0, -0, 1, -1, 123456, -123456, 99999999999L, -99999999999L, Long.MAX_VALUE, Long.MIN_VALUE)
data.bytes = byteArrayOf((-123).toByte(), 123.toByte(), (-1).toByte(), 0.toByte(), 1.toByte(), Byte.MAX_VALUE, Byte.MIN_VALUE)
data.chars =
charArrayOf(32345.toChar(), 12345.toChar(), 0.toChar(), 1.toChar(), 63.toChar(), Character.MAX_VALUE, Character.MIN_VALUE)
data.booleans = booleanArrayOf(true, false)
data.Ints = arrayOf(-1234567, 1234567, -1, 0, 1, Int.MAX_VALUE, Int.MIN_VALUE)
data.Shorts = arrayOf(-12345, 12345, -1, 0, 1, Short.MAX_VALUE, Short.MIN_VALUE)
data.Floats = arrayOf(
0.0f,
-0.0f,
1.0f,
-1.0f,
123456.0f,
-123456.0f,
0.1f,
0.2f,
-0.3f,
Math.PI.toFloat(),
Float.MAX_VALUE,
Float.MIN_VALUE
)
data.Doubles = arrayOf(0.0, -0.0, 1.0, -1.0, 123456.0, -123456.0, 0.1, 0.2, -0.3, Math.PI, Double.MAX_VALUE, Double.MIN_VALUE)
data.Longs = arrayOf(0L, -0L, 1L, -1L, 123456L, -123456L, 99999999999L, -99999999999L, Long.MAX_VALUE, Long.MIN_VALUE)
data.Bytes = arrayOf(-123, 123, -1, 0, 1, Byte.MAX_VALUE, Byte.MIN_VALUE)
data.Chars = arrayOf(32345, 12345, 0, 1, 63, Character.MAX_VALUE, Character.MIN_VALUE)
data.Booleans = arrayOf(true, false)
}
}
}