From 0401a6a1648b8a9d4ef218bbfdaab31fa7a51c89 Mon Sep 17 00:00:00 2001 From: nathan Date: Tue, 19 Jan 2016 00:12:10 +0100 Subject: [PATCH] WIP - single writer principle. Disruptor added, WIP optimizing data structures for hit on writer, instead of reader --- src/dorkbox/util/messagebus/MessageBus.java | 27 +- .../util/messagebus/common/HashMapTree.java | 7 +- .../common/StrongConcurrentSetV8.java | 6 +- .../common/adapter/ConcurrentHashMapV8.java | 4362 ----------------- .../common/adapter/Java6Adapter.java | 3 +- .../common/adapter/StampedLock.java | 1549 ------ .../common/thread/ConcurrentSet.java | 5 +- .../PublisherExactWithSuperTypes.java | 1 + .../subscription/SubscriptionFactory.java | 20 + .../subscription/SubscriptionHandler.java | 49 + .../subscription/SubscriptionHolder.java | 13 + .../subscription/SubscriptionManager.java | 83 +- .../subscription/WriterDistruptor.java | 159 + .../messagebus/synchrony/AsyncDisruptor.java | 3 +- .../util/messagebus/utils/ClassUtils.java | 39 +- .../messagebus/utils/SubscriptionUtils.java | 12 +- .../util/messagebus/utils/VarArgUtils.java | 8 +- 17 files changed, 336 insertions(+), 6010 deletions(-) delete mode 100644 src/dorkbox/util/messagebus/common/adapter/ConcurrentHashMapV8.java delete mode 100644 src/dorkbox/util/messagebus/common/adapter/StampedLock.java create mode 100644 src/dorkbox/util/messagebus/subscription/SubscriptionFactory.java create mode 100644 src/dorkbox/util/messagebus/subscription/SubscriptionHandler.java create mode 100644 src/dorkbox/util/messagebus/subscription/SubscriptionHolder.java create mode 100644 src/dorkbox/util/messagebus/subscription/WriterDistruptor.java diff --git a/src/dorkbox/util/messagebus/MessageBus.java b/src/dorkbox/util/messagebus/MessageBus.java index 3026a93..7101801 100644 --- a/src/dorkbox/util/messagebus/MessageBus.java +++ b/src/dorkbox/util/messagebus/MessageBus.java @@ -17,14 +17,15 @@ package dorkbox.util.messagebus; import dorkbox.util.messagebus.error.DefaultErrorHandler; import dorkbox.util.messagebus.error.ErrorHandlingSupport; -import dorkbox.util.messagebus.synchrony.AsyncDisruptor; -import dorkbox.util.messagebus.synchrony.Sync; -import dorkbox.util.messagebus.synchrony.Synchrony; import dorkbox.util.messagebus.publication.Publisher; import dorkbox.util.messagebus.publication.PublisherExact; import dorkbox.util.messagebus.publication.PublisherExactWithSuperTypes; import dorkbox.util.messagebus.publication.PublisherExactWithSuperTypesAndVarity; import dorkbox.util.messagebus.subscription.SubscriptionManager; +import dorkbox.util.messagebus.subscription.WriterDistruptor; +import dorkbox.util.messagebus.synchrony.AsyncDisruptor; +import dorkbox.util.messagebus.synchrony.Sync; +import dorkbox.util.messagebus.synchrony.Synchrony; /** * The base class for all message bus implementations with support for asynchronous message dispatch. @@ -39,6 +40,8 @@ public class MessageBus implements IMessageBus { private final ErrorHandlingSupport errorHandler; + private final WriterDistruptor subscriptionWriter; + private final SubscriptionManager subscriptionManager; private final Publisher publisher; @@ -90,6 +93,9 @@ class MessageBus implements IMessageBus { */ this.subscriptionManager = new SubscriptionManager(numberOfThreads, errorHandler); + subscriptionWriter = new WriterDistruptor(errorHandler, subscriptionManager); + + switch (publishMode) { case Exact: publisher = new PublisherExact(errorHandler, subscriptionManager); @@ -123,13 +129,23 @@ class MessageBus implements IMessageBus { @Override public void subscribe(final Object listener) { - MessageBus.this.subscriptionManager.subscribe(listener); + if (listener == null) { + return; + } + + subscriptionManager.subscribe(listener); +// subscriptionWriter.subscribe(listener); } @Override public void unsubscribe(final Object listener) { - MessageBus.this.subscriptionManager.unsubscribe(listener); + if (listener == null) { + return; + } + + subscriptionManager.unsubscribe(listener); +// subscriptionWriter.unsubscribe(listener); } @Override @@ -232,6 +248,7 @@ class MessageBus implements IMessageBus { @Override public void shutdown() { + this.subscriptionWriter.shutdown(); this.asyncPublication.shutdown(); this.subscriptionManager.shutdown(); } diff --git a/src/dorkbox/util/messagebus/common/HashMapTree.java b/src/dorkbox/util/messagebus/common/HashMapTree.java index 8414f2c..af02adc 100644 --- a/src/dorkbox/util/messagebus/common/HashMapTree.java +++ b/src/dorkbox/util/messagebus/common/HashMapTree.java @@ -15,8 +15,7 @@ */ package dorkbox.util.messagebus.common; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; - +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReference; @@ -37,7 +36,7 @@ public class HashMapTree { @Override protected Object initialValue() { - return JavaVersionAdapter.concurrentMap(INITIAL_SIZE, LOAD_FACTOR, 1); + return new ConcurrentHashMap(INITIAL_SIZE, LOAD_FACTOR, 1); } }; @@ -197,7 +196,7 @@ public class HashMapTree { if (checked == null) { final boolean success = children.compareAndSet(null, cached); if (success) { - keyCache.set(JavaVersionAdapter.concurrentMap(INITIAL_SIZE, LOAD_FACTOR, 1)); + keyCache.set(new ConcurrentHashMap(INITIAL_SIZE, LOAD_FACTOR, 1)); kids = cast(cached); } else { diff --git a/src/dorkbox/util/messagebus/common/StrongConcurrentSetV8.java b/src/dorkbox/util/messagebus/common/StrongConcurrentSetV8.java index 2eb4df1..8ad70ec 100644 --- a/src/dorkbox/util/messagebus/common/StrongConcurrentSetV8.java +++ b/src/dorkbox/util/messagebus/common/StrongConcurrentSetV8.java @@ -15,7 +15,7 @@ */ package dorkbox.util.messagebus.common; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; +import java.util.concurrent.ConcurrentHashMap; /** * This implementation uses strong references to the elements, uses an IdentityHashMap @@ -28,11 +28,11 @@ public class StrongConcurrentSetV8 extends StrongConcurrentSet { public StrongConcurrentSetV8(int size, float loadFactor) { // 1 for the stripe size, because that is the max concurrency with our concurrent set (since it uses R/W locks) - super(JavaVersionAdapter.>concurrentMap(size, loadFactor, 16)); + super(new ConcurrentHashMap>(size, loadFactor, 16)); } public StrongConcurrentSetV8(int size, float loadFactor, int stripeSize) { // 1 for the stripe size, because that is the max concurrency with our concurrent set (since it uses R/W locks) - super(JavaVersionAdapter.>concurrentMap(size, loadFactor, stripeSize)); + super(new ConcurrentHashMap>(size, loadFactor, stripeSize)); } } diff --git a/src/dorkbox/util/messagebus/common/adapter/ConcurrentHashMapV8.java b/src/dorkbox/util/messagebus/common/adapter/ConcurrentHashMapV8.java deleted file mode 100644 index 210f718..0000000 --- a/src/dorkbox/util/messagebus/common/adapter/ConcurrentHashMapV8.java +++ /dev/null @@ -1,4362 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - * - * Copyright 2015 dorkbox, llc - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package dorkbox.util.messagebus.common.adapter; - -import java.io.ObjectStreamField; -import java.io.Serializable; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.util.AbstractMap; -import java.util.Arrays; -import java.util.Collection; -import java.util.ConcurrentModificationException; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.LockSupport; -import java.util.concurrent.locks.ReentrantLock; - -/* - * Bulk operations removed in Java 6 backport. - */ - -/** - * A hash table supporting full concurrency of retrievals and - * high expected concurrency for updates. This class obeys the - * same functional specification as {@link java.util.Hashtable}, and - * includes versions of methods corresponding to each method of - * {@code Hashtable}. However, even though all operations are - * thread-safe, retrieval operations do not entail locking, - * and there is not any support for locking the entire table - * in a way that prevents all access. This class is fully - * interoperable with {@code Hashtable} in programs that rely on its - * thread safety but not on its synchronization details. - * - *

Retrieval operations (including {@code get}) generally do not - * block, so may overlap with update operations (including {@code put} - * and {@code remove}). Retrievals reflect the results of the most - * recently completed update operations holding upon their - * onset. (More formally, an update operation for a given key bears a - * happens-before relation with any (non-null) retrieval for - * that key reporting the updated value.) For aggregate operations - * such as {@code putAll} and {@code clear}, concurrent retrievals may - * reflect insertion or removal of only some entries. Similarly, - * Iterators and Enumerations return elements reflecting the state of - * the hash table at some point at or since the creation of the - * iterator/enumeration. They do not throw {@link - * ConcurrentModificationException}. However, iterators are designed - * to be used by only one thread at a time. Bear in mind that the - * results of aggregate status methods including {@code size}, {@code - * isEmpty}, and {@code containsValue} are typically useful only when - * a map is not undergoing concurrent updates in other threads. - * Otherwise the results of these methods reflect transient states - * that may be adequate for monitoring or estimation purposes, but not - * for program control. - * - *

The table is dynamically expanded when there are too many - * collisions (i.e., keys that have distinct hash codes but fall into - * the same slot modulo the table size), with the expected average - * effect of maintaining roughly two bins per mapping (corresponding - * to a 0.75 load factor threshold for resizing). There may be much - * variance around this average as mappings are added and removed, but - * overall, this maintains a commonly accepted time/space tradeoff for - * hash tables. However, resizing this or any other kind of hash - * table may be a relatively slow operation. When possible, it is a - * good idea to provide a size estimate as an optional {@code - * initialCapacity} constructor argument. An additional optional - * {@code loadFactor} constructor argument provides a further means of - * customizing initial table capacity by specifying the table density - * to be used in calculating the amount of space to allocate for the - * given number of elements. Also, for compatibility with previous - * versions of this class, constructors may optionally specify an - * expected {@code concurrencyLevel} as an additional hint for - * internal sizing. Note that using many keys with exactly the same - * {@code hashCode()} is a sure way to slow down performance of any - * hash table. To ameliorate impact, when keys are {@link Comparable}, - * this class may use comparison order among keys to help break ties. - * - *

A {@link Set} projection of a ConcurrentHashMapV8 may be created - * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed - * (using {@link #keySet(Object)} when only keys are of interest, and the - * mapped values are (perhaps transiently) not used or all take the - * same mapping value. - * - *

This class and its views and iterators implement all of the - * optional methods of the {@link Map} and {@link Iterator} - * interfaces. - * - *

Like {@link Hashtable} but unlike {@link HashMap}, this class - * does not allow {@code null} to be used as a key or value. - * - *

ConcurrentHashMapV8s support a set of sequential and parallel bulk - * operations that are designed - * to be safely, and often sensibly, applied even with maps that are - * being concurrently updated by other threads; for example, when - * computing a snapshot summary of the values in a shared registry. - * There are three kinds of operation, each with four forms, accepting - * functions with Keys, Values, Entries, and (Key, Value) arguments - * and/or return values. Because the elements of a ConcurrentHashMapV8 - * are not ordered in any particular way, and may be processed in - * different orders in different parallel executions, the correctness - * of supplied functions should not depend on any ordering, or on any - * other objects or values that may transiently change while - * computation is in progress; and except for forEach actions, should - * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry} - * objects do not support method {@code setValue}. - * - *

    - *
  • forEach: Perform a given action on each element. - * A variant form applies a given transformation on each element - * before performing the action.
  • - * - *
  • search: Return the first available non-null result of - * applying a given function on each element; skipping further - * search when a result is found.
  • - * - *
  • reduce: Accumulate each element. The supplied reduction - * function cannot rely on ordering (more formally, it should be - * both associative and commutative). There are five variants: - * - *
      - * - *
    • Plain reductions. (There is not a form of this method for - * (key, value) function arguments since there is no corresponding - * return type.)
    • - * - *
    • Mapped reductions that accumulate the results of a given - * function applied to each element.
    • - * - *
    • Reductions to scalar doubles, longs, and ints, using a - * given basis value.
    • - * - *
    - *
  • - *
- * - *

These bulk operations accept a {@code parallelismThreshold} - * argument. Methods proceed sequentially if the current map size is - * estimated to be less than the given threshold. Using a value of - * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value - * of {@code 1} results in maximal parallelism by partitioning into - * enough subtasks to fully utilize the {@link - * ForkJoinPool#commonPool()} that is used for all parallel - * computations. Normally, you would initially choose one of these - * extreme values, and then measure performance of using in-between - * values that trade off overhead versus throughput. - * - *

The concurrency properties of bulk operations follow - * from those of ConcurrentHashMapV8: Any non-null result returned - * from {@code get(key)} and related access methods bears a - * happens-before relation with the associated insertion or - * update. The result of any bulk operation reflects the - * composition of these per-element relations (but is not - * necessarily atomic with respect to the map as a whole unless it - * is somehow known to be quiescent). Conversely, because keys - * and values in the map are never null, null serves as a reliable - * atomic indicator of the current lack of any result. To - * maintain this property, null serves as an implicit basis for - * all non-scalar reduction operations. For the double, long, and - * int versions, the basis should be one that, when combined with - * any other value, returns that other value (more formally, it - * should be the identity element for the reduction). Most common - * reductions have these properties; for example, computing a sum - * with basis 0 or a minimum with basis MAX_VALUE. - * - *

Search and transformation functions provided as arguments - * should similarly return null to indicate the lack of any result - * (in which case it is not used). In the case of mapped - * reductions, this also enables transformations to serve as - * filters, returning null (or, in the case of primitive - * specializations, the identity basis) if the element should not - * be combined. You can create compound transformations and - * filterings by composing them yourself under this "null means - * there is nothing there now" rule before using them in search or - * reduce operations. - * - *

Methods accepting and/or returning Entry arguments maintain - * key-value associations. They may be useful for example when - * finding the key for the greatest value. Note that "plain" Entry - * arguments can be supplied using {@code new - * AbstractMap.SimpleEntry(k,v)}. - * - *

Bulk operations may complete abruptly, throwing an - * exception encountered in the application of a supplied - * function. Bear in mind when handling such exceptions that other - * concurrently executing functions could also have thrown - * exceptions, or would have done so if the first exception had - * not occurred. - * - *

Speedups for parallel compared to sequential forms are common - * but not guaranteed. Parallel operations involving brief functions - * on small maps may execute more slowly than sequential forms if the - * underlying work to parallelize the computation is more expensive - * than the computation itself. Similarly, parallelization may not - * lead to much actual parallelism if all processors are busy - * performing unrelated tasks. - * - *

All arguments to all task methods must be non-null. - * - *

jsr166e note: During transition, this class - * uses nested functional interfaces with different names but the - * same forms as those expected for JDK8. - * - *

This class is a member of the - * - * Java Collections Framework. - * - * @since 1.5 - * @author Doug Lea - * @param the type of keys maintained by this map - * @param the type of mapped values - */ -@SuppressWarnings("all") -public class ConcurrentHashMapV8 extends AbstractMap - implements ConcurrentMap, Serializable { - private static final long serialVersionUID = 7249069246763182397L; - - /** - * An object for traversing and partitioning elements of a source. - * This interface provides a subset of the functionality of JDK8 - * java.util.Spliterator. - */ - public static interface ConcurrentHashMapSpliterator { - /** - * If possible, returns a new spliterator covering - * approximately one half of the elements, which will not be - * covered by this spliterator. Returns null if cannot be - * split. - */ - ConcurrentHashMapSpliterator trySplit(); - /** - * Returns an estimate of the number of elements covered by - * this Spliterator. - */ - long estimateSize(); - - /** Applies the action to each untraversed element */ - void forEachRemaining(Action action); - /** If an element remains, applies the action and returns true. */ - boolean tryAdvance(Action action); - } - - // Sams - /** Interface describing a void action of one argument */ - public interface Action { void apply(A a); } - /** Interface describing a void action of two arguments */ - public interface BiAction { void apply(A a, B b); } - /** Interface describing a function of one argument */ - public interface Fun { T apply(A a); } - /** Interface describing a function of two arguments */ - public interface BiFun { T apply(A a, B b); } - /** Interface describing a function mapping its argument to a double */ - public interface ObjectToDouble { double apply(A a); } - /** Interface describing a function mapping its argument to a long */ - public interface ObjectToLong { long apply(A a); } - /** Interface describing a function mapping its argument to an int */ - public interface ObjectToInt {int apply(A a); } - /** Interface describing a function mapping two arguments to a double */ - public interface ObjectByObjectToDouble { double apply(A a, B b); } - /** Interface describing a function mapping two arguments to a long */ - public interface ObjectByObjectToLong { long apply(A a, B b); } - /** Interface describing a function mapping two arguments to an int */ - public interface ObjectByObjectToInt {int apply(A a, B b); } - /** Interface describing a function mapping two doubles to a double */ - public interface DoubleByDoubleToDouble { double apply(double a, double b); } - /** Interface describing a function mapping two longs to a long */ - public interface LongByLongToLong { long apply(long a, long b); } - /** Interface describing a function mapping two ints to an int */ - public interface IntByIntToInt { int apply(int a, int b); } - - /* - * Overview: - * - * The primary design goal of this hash table is to maintain - * concurrent readability (typically method get(), but also - * iterators and related methods) while minimizing update - * contention. Secondary goals are to keep space consumption about - * the same or better than java.util.HashMap, and to support high - * initial insertion rates on an empty table by many threads. - * - * This map usually acts as a binned (bucketed) hash table. Each - * key-value mapping is held in a Node. Most nodes are instances - * of the basic Node class with hash, key, value, and next - * fields. However, various subclasses exist: TreeNodes are - * arranged in balanced trees, not lists. TreeBins hold the roots - * of sets of TreeNodes. ForwardingNodes are placed at the heads - * of bins during resizing. ReservationNodes are used as - * placeholders while establishing values in computeIfAbsent and - * related methods. The types TreeBin, ForwardingNode, and - * ReservationNode do not hold normal user keys, values, or - * hashes, and are readily distinguishable during search etc - * because they have negative hash fields and null key and value - * fields. (These special nodes are either uncommon or transient, - * so the impact of carrying around some unused fields is - * insignificant.) - * - * The table is lazily initialized to a power-of-two size upon the - * first insertion. Each bin in the table normally contains a - * list of Nodes (most often, the list has only zero or one Node). - * Table accesses require volatile/atomic reads, writes, and - * CASes. Because there is no other way to arrange this without - * adding further indirections, we use intrinsics - * (sun.misc.Unsafe) operations. - * - * We use the top (sign) bit of Node hash fields for control - * purposes -- it is available anyway because of addressing - * constraints. Nodes with negative hash fields are specially - * handled or ignored in map methods. - * - * Insertion (via put or its variants) of the first node in an - * empty bin is performed by just CASing it to the bin. This is - * by far the most common case for put operations under most - * key/hash distributions. Other update operations (insert, - * delete, and replace) require locks. We do not want to waste - * the space required to associate a distinct lock object with - * each bin, so instead use the first node of a bin list itself as - * a lock. Locking support for these locks relies on builtin - * "synchronized" monitors. - * - * Using the first node of a list as a lock does not by itself - * suffice though: When a node is locked, any update must first - * validate that it is still the first node after locking it, and - * retry if not. Because new nodes are always appended to lists, - * once a node is first in a bin, it remains first until deleted - * or the bin becomes invalidated (upon resizing). - * - * The main disadvantage of per-bin locks is that other update - * operations on other nodes in a bin list protected by the same - * lock can stall, for example when user equals() or mapping - * functions take a long time. However, statistically, under - * random hash codes, this is not a common problem. Ideally, the - * frequency of nodes in bins follows a Poisson distribution - * (http://en.wikipedia.org/wiki/Poisson_distribution) with a - * parameter of about 0.5 on average, given the resizing threshold - * of 0.75, although with a large variance because of resizing - * granularity. Ignoring variance, the expected occurrences of - * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The - * first values are: - * - * 0: 0.60653066 - * 1: 0.30326533 - * 2: 0.07581633 - * 3: 0.01263606 - * 4: 0.00157952 - * 5: 0.00015795 - * 6: 0.00001316 - * 7: 0.00000094 - * 8: 0.00000006 - * more: less than 1 in ten million - * - * Lock contention probability for two threads accessing distinct - * elements is roughly 1 / (8 * #elements) under random hashes. - * - * Actual hash code distributions encountered in practice - * sometimes deviate significantly from uniform randomness. This - * includes the case when N > (1<<30), so some keys MUST collide. - * Similarly for dumb or hostile usages in which multiple keys are - * designed to have identical hash codes or ones that differs only - * in masked-out high bits. So we use a secondary strategy that - * applies when the number of nodes in a bin exceeds a - * threshold. These TreeBins use a balanced tree to hold nodes (a - * specialized form of red-black trees), bounding search time to - * O(log N). Each search step in a TreeBin is at least twice as - * slow as in a regular list, but given that N cannot exceed - * (1<<64) (before running out of addresses) this bounds search - * steps, lock hold times, etc, to reasonable constants (roughly - * 100 nodes inspected per operation worst case) so long as keys - * are Comparable (which is very common -- String, Long, etc). - * TreeBin nodes (TreeNodes) also maintain the same "next" - * traversal pointers as regular nodes, so can be traversed in - * iterators in the same way. - * - * The table is resized when occupancy exceeds a percentage - * threshold (nominally, 0.75, but see below). Any thread - * noticing an overfull bin may assist in resizing after the - * initiating thread allocates and sets up the replacement - * array. However, rather than stalling, these other threads may - * proceed with insertions etc. The use of TreeBins shields us - * from the worst case effects of overfilling while resizes are in - * progress. Resizing proceeds by transferring bins, one by one, - * from the table to the next table. To enable concurrency, the - * next table must be (incrementally) prefilled with place-holders - * serving as reverse forwarders to the old table. Because we are - * using power-of-two expansion, the elements from each bin must - * either stay at same index, or move with a power of two - * offset. We eliminate unnecessary node creation by catching - * cases where old nodes can be reused because their next fields - * won't change. On average, only about one-sixth of them need - * cloning when a table doubles. The nodes they replace will be - * garbage collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of concurrently - * traversing table. Upon transfer, the old table bin contains - * only a special forwarding node (with hash field "MOVED") that - * contains the next table as its key. On encountering a - * forwarding node, access and update operations restart, using - * the new table. - * - * Each bin transfer requires its bin lock, which can stall - * waiting for locks while resizing. However, because other - * threads can join in and help resize rather than contend for - * locks, average aggregate waits become shorter as resizing - * progresses. The transfer operation must also ensure that all - * accessible bins in both the old and new table are usable by any - * traversal. This is arranged by proceeding from the last bin - * (table.length - 1) up towards the first. Upon seeing a - * forwarding node, traversals (see class Traverser) arrange to - * move to the new table without revisiting nodes. However, to - * ensure that no intervening nodes are skipped, bin splitting can - * only begin after the associated reverse-forwarders are in - * place. - * - * The traversal scheme also applies to partial traversals of - * ranges of bins (via an alternate Traverser constructor) - * to support partitioned aggregate operations. Also, read-only - * operations give up if ever forwarded to a null table, which - * provides support for shutdown-style clearing, which is also not - * currently implemented. - * - * Lazy table initialization minimizes footprint until first use, - * and also avoids resizings when the first operation is from a - * putAll, constructor with map argument, or deserialization. - * These cases attempt to override the initial capacity settings, - * but harmlessly fail to take effect in cases of races. - * - * The element count is maintained using a specialization of - * LongAdder. We need to incorporate a specialization rather than - * just use a LongAdder in order to access implicit - * contention-sensing that leads to creation of multiple - * CounterCells. The counter mechanics avoid contention on - * updates but can encounter cache thrashing if read too - * frequently during concurrent access. To avoid reading so often, - * resizing under contention is attempted only upon adding to a - * bin already holding two or more nodes. Under uniform hash - * distributions, the probability of this occurring at threshold - * is around 13%, meaning that only about 1 in 8 puts check - * threshold (and after resizing, many fewer do so). - * - * TreeBins use a special form of comparison for search and - * related operations (which is the main reason we cannot use - * existing collections such as TreeMaps). TreeBins contain - * Comparable elements, but may contain others, as well as - * elements that are Comparable but not necessarily Comparable for - * the same T, so we cannot invoke compareTo among them. To handle - * this, the tree is ordered primarily by hash value, then by - * Comparable.compareTo order if applicable. On lookup at a node, - * if elements are not comparable or compare as 0 then both left - * and right children may need to be searched in the case of tied - * hash values. (This corresponds to the full list search that - * would be necessary if all elements were non-Comparable and had - * tied hashes.) On insertion, to keep a total ordering (or as - * close as is required here) across rebalancings, we compare - * classes and identityHashCodes as tie-breakers. The red-black - * balancing code is updated from pre-jdk-collections - * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) - * based in turn on Cormen, Leiserson, and Rivest "Introduction to - * Algorithms" (CLR). - * - * TreeBins also require an additional locking mechanism. While - * list traversal is always possible by readers even during - * updates, tree traversal is not, mainly because of tree-rotations - * that may change the root node and/or its linkages. TreeBins - * include a simple read-write lock mechanism parasitic on the - * main bin-synchronization strategy: Structural adjustments - * associated with an insertion or removal are already bin-locked - * (and so cannot conflict with other writers) but must wait for - * ongoing readers to finish. Since there can be only one such - * waiter, we use a simple scheme using a single "waiter" field to - * block writers. However, readers need never block. If the root - * lock is held, they proceed along the slow traversal path (via - * next-pointers) until the lock becomes available or the list is - * exhausted, whichever comes first. These cases are not fast, but - * maximize aggregate expected throughput. - * - * Maintaining API and serialization compatibility with previous - * versions of this class introduces several oddities. Mainly: We - * leave untouched but unused constructor arguments refering to - * concurrencyLevel. We accept a loadFactor constructor argument, - * but apply it only to initial table capacity (which is the only - * time that we can guarantee to honor it.) We also declare an - * unused "Segment" class that is instantiated in minimal form - * only when serializing. - * - * Also, solely for compatibility with previous versions of this - * class, it extends AbstractMap, even though all of its methods - * are overridden, so it is just useless baggage. - * - * This file is organized to make things a little easier to follow - * while reading than they might otherwise: First the main static - * declarations and utilities, then fields, then main public - * methods (with a few factorings of multiple public methods into - * internal ones), then sizing methods, trees, traversers, and - * bulk operations. - */ - - /* ---------------- Constants -------------- */ - - /** - * The largest possible table capacity. This value must be - * exactly 1<<30 to stay within Java array allocation and indexing - * bounds for power of two table sizes, and is further required - * because the top two bits of 32bit hash fields are used for - * control purposes. - */ - private static final int MAXIMUM_CAPACITY = 1 << 30; - - /** - * The default initial table capacity. Must be a power of 2 - * (i.e., at least 1) and at most MAXIMUM_CAPACITY. - */ - private static final int DEFAULT_CAPACITY = 16; - - /** - * The largest possible (non-power of two) array size. - * Needed by toArray and related methods. - */ - static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; - - /** - * The default concurrency level for this table. Unused but - * defined for compatibility with previous versions of this class. - */ - private static final int DEFAULT_CONCURRENCY_LEVEL = 16; - - /** - * The load factor for this table. Overrides of this value in - * constructors affect only the initial table capacity. The - * actual floating point value isn't normally used -- it is - * simpler to use expressions such as {@code n - (n >>> 2)} for - * the associated resizing threshold. - */ - private static final float LOAD_FACTOR = 0.75f; - - /** - * The bin count threshold for using a tree rather than list for a - * bin. Bins are converted to trees when adding an element to a - * bin with at least this many nodes. The value must be greater - * than 2, and should be at least 8 to mesh with assumptions in - * tree removal about conversion back to plain bins upon - * shrinkage. - */ - static final int TREEIFY_THRESHOLD = 8; - - /** - * The bin count threshold for untreeifying a (split) bin during a - * resize operation. Should be less than TREEIFY_THRESHOLD, and at - * most 6 to mesh with shrinkage detection under removal. - */ - static final int UNTREEIFY_THRESHOLD = 6; - - /** - * The smallest table capacity for which bins may be treeified. - * (Otherwise the table is resized if too many nodes in a bin.) - * The value should be at least 4 * TREEIFY_THRESHOLD to avoid - * conflicts between resizing and treeification thresholds. - */ - static final int MIN_TREEIFY_CAPACITY = 64; - - /** - * Minimum number of rebinnings per transfer step. Ranges are - * subdivided to allow multiple resizer threads. This value - * serves as a lower bound to avoid resizers encountering - * excessive memory contention. The value should be at least - * DEFAULT_CAPACITY. - */ - private static final int MIN_TRANSFER_STRIDE = 16; - - /* - * Encodings for Node hash fields. See above for explanation. - */ - static final int MOVED = -1; // hash for forwarding nodes - static final int TREEBIN = -2; // hash for roots of trees - static final int RESERVED = -3; // hash for transient reservations - static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash - - /** Number of CPUS, to place bounds on some sizings */ - static final int NCPU = Runtime.getRuntime().availableProcessors(); - - /** For serialization compatibility. */ - private static final ObjectStreamField[] serialPersistentFields = { - new ObjectStreamField("segments", Segment[].class), - new ObjectStreamField("segmentMask", Integer.TYPE), - new ObjectStreamField("segmentShift", Integer.TYPE) - }; - - /* ---------------- Nodes -------------- */ - - /** - * Key-value entry. This class is never exported out as a - * user-mutable Map.Entry (i.e., one supporting setValue; see - * MapEntry below), but can be used for read-only traversals used - * in bulk tasks. Subclasses of Node with a negative hash field - * are special, and contain null keys and values (but are never - * exported). Otherwise, keys and vals are never null. - */ - static class Node implements Map.Entry { - final int hash; - final K key; - volatile V val; - volatile Node next; - - Node(int hash, K key, V val, Node next) { - this.hash = hash; - this.key = key; - this.val = val; - this.next = next; - } - - @Override - public final K getKey() { return this.key; } - @Override - public final V getValue() { return this.val; } - @Override - public final int hashCode() { return this.key.hashCode() ^ this.val.hashCode(); } - @Override - public final String toString(){ return this.key + "=" + this.val; } - @Override - public final V setValue(V value) { - throw new UnsupportedOperationException(); - } - - @Override - public final boolean equals(Object o) { - Object k, v, u; Map.Entry e; - return o instanceof Map.Entry && - (k = (e = (Map.Entry)o).getKey()) != null && - (v = e.getValue()) != null && - (k == this.key || k.equals(this.key)) && - (v == (u = this.val) || v.equals(u)); - } - - /** - * Virtualized support for map.get(); overridden in subclasses. - */ - Node find(int h, Object k) { - Node e = this; - if (k != null) { - do { - K ek; - if (e.hash == h && - ((ek = e.key) == k || ek != null && k.equals(ek))) { - return e; - } - } while ((e = e.next) != null); - } - return null; - } - } - - /* ---------------- Static utilities -------------- */ - - /** - * Spreads (XORs) higher bits of hash to lower and also forces top - * bit to 0. Because the table uses power-of-two masking, sets of - * hashes that vary only in bits above the current mask will - * always collide. (Among known examples are sets of Float keys - * holding consecutive whole numbers in small tables.) So we - * apply a transform that spreads the impact of higher bits - * downward. There is a tradeoff between speed, utility, and - * quality of bit-spreading. Because many common sets of hashes - * are already reasonably distributed (so don't benefit from - * spreading), and because we use trees to handle large sets of - * collisions in bins, we just XOR some shifted bits in the - * cheapest possible way to reduce systematic lossage, as well as - * to incorporate impact of the highest bits that would otherwise - * never be used in index calculations because of table bounds. - */ - static final int spread(int h) { - return (h ^ h >>> 16) & HASH_BITS; - } - - /** - * Returns a power of two table size for the given desired capacity. - * See Hackers Delight, sec 3.2 - */ - private static final int tableSizeFor(int c) { - int n = c - 1; - n |= n >>> 1; - n |= n >>> 2; - n |= n >>> 4; - n |= n >>> 8; - n |= n >>> 16; - return n < 0 ? 1 : n >= MAXIMUM_CAPACITY ? MAXIMUM_CAPACITY : n + 1; - } - - /** - * Returns x's Class if it is of the form "class C implements - * Comparable", else null. - */ - static Class comparableClassFor(Object x) { - if (x instanceof Comparable) { - Class c; Type[] ts, as; Type t; ParameterizedType p; - if ((c = x.getClass()) == String.class) { - return c; - } - if ((ts = c.getGenericInterfaces()) != null) { - for (int i = 0; i < ts.length; ++i) { - if ((t = ts[i]) instanceof ParameterizedType && - (p = (ParameterizedType)t).getRawType() == - Comparable.class && - (as = p.getActualTypeArguments()) != null && - as.length == 1 && as[0] == c) { - return c; - } - } - } - } - return null; - } - - /** - * Returns k.compareTo(x) if x matches kc (k's screened comparable - * class), else 0. - */ - @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable - static int compareComparables(Class kc, Object k, Object x) { - return x == null || x.getClass() != kc ? 0 : - ((Comparable)k).compareTo(x); - } - - /* ---------------- Table element access -------------- */ - - /* - * Volatile access methods are used for table elements as well as - * elements of in-progress next table while resizing. All uses of - * the tab arguments must be null checked by callers. All callers - * also paranoically precheck that tab's length is not zero (or an - * equivalent check), thus ensuring that any index argument taking - * the form of a hash value anded with (length - 1) is a valid - * index. Note that, to be correct wrt arbitrary concurrency - * errors by users, these checks must operate on local variables, - * which accounts for some odd-looking inline assignments below. - * Note that calls to setTabAt always occur within locked regions, - * and so in principle require only release ordering, not need - * full volatile semantics, but are currently coded as volatile - * writes to be conservative. - */ - - @SuppressWarnings("unchecked") - static final Node tabAt(Node[] tab, int i) { - return (Node)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); - } - - static final boolean casTabAt(Node[] tab, int i, - Node c, Node v) { - return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); - } - - static final void setTabAt(Node[] tab, int i, Node v) { - U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); - } - - /* ---------------- Fields -------------- */ - - /** - * The array of bins. Lazily initialized upon first insertion. - * Size is always a power of two. Accessed directly by iterators. - */ - transient volatile Node[] table; - - /** - * The next table to use; non-null only while resizing. - */ - private transient volatile Node[] nextTable; - - /** - * Base counter value, used mainly when there is no contention, - * but also as a fallback during table initialization - * races. Updated via CAS. - */ - private transient volatile long baseCount; - - /** - * Table initialization and resizing control. When negative, the - * table is being initialized or resized: -1 for initialization, - * else -(1 + the number of active resizing threads). Otherwise, - * when table is null, holds the initial table size to use upon - * creation, or 0 for default. After initialization, holds the - * next element count value upon which to resize the table. - */ - private transient volatile int sizeCtl; - - /** - * The next table index (plus one) to split while resizing. - */ - private transient volatile int transferIndex; - - /** - * The least available table index to split while resizing. - */ - private transient volatile int transferOrigin; - - /** - * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. - */ - private transient volatile int cellsBusy; - - /** - * Table of counter cells. When non-null, size is a power of 2. - */ - private transient volatile CounterCell[] counterCells; - - // views - private transient KeySetView keySet; - private transient ValuesView values; - private transient EntrySetView entrySet; - - - /* ---------------- Public operations -------------- */ - - /** - * Creates a new, empty map with the default initial table size (16). - */ - public ConcurrentHashMapV8() { - } - - /** - * Creates a new, empty map with an initial table size - * accommodating the specified number of elements without the need - * to dynamically resize. - * - * @param initialCapacity The implementation performs internal - * sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative - */ - public ConcurrentHashMapV8(int initialCapacity) { - if (initialCapacity < 0) { - throw new IllegalArgumentException(); - } - int cap = initialCapacity >= MAXIMUM_CAPACITY >>> 1 ? - MAXIMUM_CAPACITY : - tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1); - this.sizeCtl = cap; - } - - /** - * Creates a new map with the same mappings as the given map. - * - * @param m the map - */ - public ConcurrentHashMapV8(Map m) { - this.sizeCtl = DEFAULT_CAPACITY; - putAll(m); - } - - /** - * Creates a new, empty map with an initial table size based on - * the given number of elements ({@code initialCapacity}) and - * initial table density ({@code loadFactor}). - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements, - * given the specified load factor. - * @param loadFactor the load factor (table density) for - * establishing the initial table size - * @throws IllegalArgumentException if the initial capacity of - * elements is negative or the load factor is nonpositive - * - * @since 1.6 - */ - public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { - this(initialCapacity, loadFactor, 1); - } - - /** - * Creates a new, empty map with an initial table size based on - * the given number of elements ({@code initialCapacity}), table - * density ({@code loadFactor}), and number of concurrently - * updating threads ({@code concurrencyLevel}). - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements, - * given the specified load factor. - * @param loadFactor the load factor (table density) for - * establishing the initial table size - * @param concurrencyLevel the estimated number of concurrently - * updating threads. The implementation may use this value as - * a sizing hint. - * @throws IllegalArgumentException if the initial capacity is - * negative or the load factor or concurrencyLevel are - * nonpositive - */ - public ConcurrentHashMapV8(int initialCapacity, - float loadFactor, int concurrencyLevel) { - if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) { - throw new IllegalArgumentException(); - } - if (initialCapacity < concurrencyLevel) - { - initialCapacity = concurrencyLevel; // as estimated threads - } - long size = (long)(1.0 + initialCapacity / loadFactor); - int cap = size >= MAXIMUM_CAPACITY ? - MAXIMUM_CAPACITY : tableSizeFor((int)size); - this.sizeCtl = cap; - } - - // Original (since JDK1.2) Map methods - - /** - * {@inheritDoc} - */ - @Override - public int size() { - long n = sumCount(); - return n < 0L ? 0 : - n > Integer.MAX_VALUE ? Integer.MAX_VALUE : - (int)n; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isEmpty() { - return sumCount() <= 0L; // ignore transient negative values - } - - /** - * Returns the value to which the specified key is mapped, - * or {@code null} if this map contains no mapping for the key. - * - *

More formally, if this map contains a mapping from a key - * {@code k} to a value {@code v} such that {@code key.equals(k)}, - * then this method returns {@code v}; otherwise it returns - * {@code null}. (There can be at most one such mapping.) - * - * @throws NullPointerException if the specified key is null - */ - @Override - public V get(Object key) { - Node[] tab; Node e, p; int n, eh; K ek; - int h = spread(key.hashCode()); - if ((tab = this.table) != null && (n = tab.length) > 0 && - (e = tabAt(tab, n - 1 & h)) != null) { - if ((eh = e.hash) == h) { - if ((ek = e.key) == key || ek != null && key.equals(ek)) { - return e.val; - } - } - else if (eh < 0) { - return (p = e.find(h, key)) != null ? p.val : null; - } - while ((e = e.next) != null) { - if (e.hash == h && - ((ek = e.key) == key || ek != null && key.equals(ek))) { - return e.val; - } - } - } - return null; - } - - /** - * Tests if the specified object is a key in this table. - * - * @param key possible key - * @return {@code true} if and only if the specified object - * is a key in this table, as determined by the - * {@code equals} method; {@code false} otherwise - * @throws NullPointerException if the specified key is null - */ - @Override - public boolean containsKey(Object key) { - return get(key) != null; - } - - /** - * Returns {@code true} if this map maps one or more keys to the - * specified value. Note: This method may require a full traversal - * of the map, and is much slower than method {@code containsKey}. - * - * @param value value whose presence in this map is to be tested - * @return {@code true} if this map maps one or more keys to the - * specified value - * @throws NullPointerException if the specified value is null - */ - @Override - public boolean containsValue(Object value) { - if (value == null) { - throw new NullPointerException(); - } - Node[] t; - if ((t = this.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - V v; - if ((v = p.val) == value || v != null && value.equals(v)) { - return true; - } - } - } - return false; - } - - /** - * Maps the specified key to the specified value in this table. - * Neither the key nor the value can be null. - * - *

The value can be retrieved by calling the {@code get} method - * with a key that is equal to the original key. - * - * @param key key with which the specified value is to be associated - * @param value value to be associated with the specified key - * @return the previous value associated with {@code key}, or - * {@code null} if there was no mapping for {@code key} - * @throws NullPointerException if the specified key or value is null - */ - @Override - public V put(K key, V value) { - return putVal(key, value, false); - } - - /** Implementation for put and putIfAbsent */ - final V putVal(K key, V value, boolean onlyIfAbsent) { - if (key == null || value == null) { - throw new NullPointerException(); - } - int hash = spread(key.hashCode()); - int binCount = 0; - for (Node[] tab = this.table;;) { - Node f; int n, i, fh; - if (tab == null || (n = tab.length) == 0) { - tab = initTable(); - } else if ((f = tabAt(tab, i = n - 1 & hash)) == null) { - if (casTabAt(tab, i, null, - new Node(hash, key, value, null))) - { - break; // no lock when adding to empty bin - } - } - else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - } else { - V oldVal = null; - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - binCount = 1; - for (Node e = f;; ++binCount) { - K ek; - if (e.hash == hash && - ((ek = e.key) == key || - ek != null && key.equals(ek))) { - oldVal = e.val; - if (!onlyIfAbsent) { - e.val = value; - } - break; - } - Node pred = e; - if ((e = e.next) == null) { - pred.next = new Node(hash, key, - value, null); - break; - } - } - } - else if (f instanceof TreeBin) { - Node p; - binCount = 2; - if ((p = ((TreeBin)f).putTreeVal(hash, key, - value)) != null) { - oldVal = p.val; - if (!onlyIfAbsent) { - p.val = value; - } - } - } - } - } - if (binCount != 0) { - if (binCount >= TREEIFY_THRESHOLD) { - treeifyBin(tab, i); - } - if (oldVal != null) { - return oldVal; - } - break; - } - } - } - addCount(1L, binCount); - return null; - } - - /** - * Copies all of the mappings from the specified map to this one. - * These mappings replace any mappings that this map had for any of the - * keys currently in the specified map. - * - * @param m mappings to be stored in this map - */ - @Override - public void putAll(Map m) { - tryPresize(m.size()); - for (Map.Entry e : m.entrySet()) { - putVal(e.getKey(), e.getValue(), false); - } - } - - /** - * Removes the key (and its corresponding value) from this map. - * This method does nothing if the key is not in the map. - * - * @param key the key that needs to be removed - * @return the previous value associated with {@code key}, or - * {@code null} if there was no mapping for {@code key} - * @throws NullPointerException if the specified key is null - */ - @Override - public V remove(Object key) { - return replaceNode(key, null, null); - } - - /** - * Implementation for the four public remove/replace methods: - * Replaces node value with v, conditional upon match of cv if - * non-null. If resulting value is null, delete. - */ - final V replaceNode(Object key, V value, Object cv) { - int hash = spread(key.hashCode()); - for (Node[] tab = this.table;;) { - Node f; int n, i, fh; - if (tab == null || (n = tab.length) == 0 || - (f = tabAt(tab, i = n - 1 & hash)) == null) { - break; - } else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - } else { - V oldVal = null; - boolean validated = false; - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - validated = true; - for (Node e = f, pred = null;;) { - K ek; - if (e.hash == hash && - ((ek = e.key) == key || - ek != null && key.equals(ek))) { - V ev = e.val; - if (cv == null || cv == ev || - ev != null && cv.equals(ev)) { - oldVal = ev; - if (value != null) { - e.val = value; - } else if (pred != null) { - pred.next = e.next; - } else { - setTabAt(tab, i, e.next); - } - } - break; - } - pred = e; - if ((e = e.next) == null) { - break; - } - } - } - else if (f instanceof TreeBin) { - validated = true; - TreeBin t = (TreeBin)f; - TreeNode r, p; - if ((r = t.root) != null && - (p = r.findTreeNode(hash, key, null)) != null) { - V pv = p.val; - if (cv == null || cv == pv || - pv != null && cv.equals(pv)) { - oldVal = pv; - if (value != null) { - p.val = value; - } else if (t.removeTreeNode(p)) { - setTabAt(tab, i, untreeify(t.first)); - } - } - } - } - } - } - if (validated) { - if (oldVal != null) { - if (value == null) { - addCount(-1L, -1); - } - return oldVal; - } - break; - } - } - } - return null; - } - - /** - * Removes all of the mappings from this map. - */ - @Override - public void clear() { - long delta = 0L; // negative number of deletions - int i = 0; - Node[] tab = this.table; - while (tab != null && i < tab.length) { - int fh; - Node f = tabAt(tab, i); - if (f == null) { - ++i; - } else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - i = 0; // restart - } - else { - synchronized (f) { - if (tabAt(tab, i) == f) { - Node p = fh >= 0 ? f : - f instanceof TreeBin ? - ((TreeBin)f).first : null; - while (p != null) { - --delta; - p = p.next; - } - setTabAt(tab, i++, null); - } - } - } - } - if (delta != 0L) { - addCount(delta, -1); - } - } - - /** - * Returns a {@link Set} view of the keys contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from this map, - * via the {@code Iterator.remove}, {@code Set.remove}, - * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. It does not support the {@code add} or - * {@code addAll} operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - * - * @return the set view - */ - @Override - public KeySetView keySet() { - KeySetView ks; - return (ks = this.keySet) != null ? ks : (this.keySet = new KeySetView(this, null)); - } - - /** - * Returns a {@link Collection} view of the values contained in this map. - * The collection is backed by the map, so changes to the map are - * reflected in the collection, and vice-versa. The collection - * supports element removal, which removes the corresponding - * mapping from this map, via the {@code Iterator.remove}, - * {@code Collection.remove}, {@code removeAll}, - * {@code retainAll}, and {@code clear} operations. It does not - * support the {@code add} or {@code addAll} operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - * - * @return the collection view - */ - @Override - public Collection values() { - ValuesView vs; - return (vs = this.values) != null ? vs : (this.values = new ValuesView(this)); - } - - /** - * Returns a {@link Set} view of the mappings contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from the map, - * via the {@code Iterator.remove}, {@code Set.remove}, - * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - * - * @return the set view - */ - @Override - public Set> entrySet() { - EntrySetView es; - return (es = this.entrySet) != null ? es : (this.entrySet = new EntrySetView(this)); - } - - /** - * Returns the hash code value for this {@link Map}, i.e., - * the sum of, for each key-value pair in the map, - * {@code key.hashCode() ^ value.hashCode()}. - * - * @return the hash code value for this map - */ - @Override - public int hashCode() { - int h = 0; - Node[] t; - if ((t = this.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - h += p.key.hashCode() ^ p.val.hashCode(); - } - } - return h; - } - - /** - * Returns a string representation of this map. The string - * representation consists of a list of key-value mappings (in no - * particular order) enclosed in braces ("{@code {}}"). Adjacent - * mappings are separated by the characters {@code ", "} (comma - * and space). Each key-value mapping is rendered as the key - * followed by an equals sign ("{@code =}") followed by the - * associated value. - * - * @return a string representation of this map - */ - @Override - public String toString() { - Node[] t; - int f = (t = this.table) == null ? 0 : t.length; - Traverser it = new Traverser(t, f, 0, f); - StringBuilder sb = new StringBuilder(); - sb.append('{'); - Node p; - if ((p = it.advance()) != null) { - for (;;) { - K k = p.key; - V v = p.val; - sb.append(k == this ? "(this Map)" : k); - sb.append('='); - sb.append(v == this ? "(this Map)" : v); - if ((p = it.advance()) == null) { - break; - } - sb.append(',').append(' '); - } - } - return sb.append('}').toString(); - } - - /** - * Compares the specified object with this map for equality. - * Returns {@code true} if the given object is a map with the same - * mappings as this map. This operation may return misleading - * results if either map is concurrently modified during execution - * of this method. - * - * @param o object to be compared for equality with this map - * @return {@code true} if the specified object is equal to this map - */ - @Override - public boolean equals(Object o) { - if (o != this) { - if (!(o instanceof Map)) { - return false; - } - Map m = (Map) o; - Node[] t; - int f = (t = this.table) == null ? 0 : t.length; - Traverser it = new Traverser(t, f, 0, f); - for (Node p; (p = it.advance()) != null; ) { - V val = p.val; - Object v = m.get(p.key); - if (v == null || v != val && !v.equals(val)) { - return false; - } - } - for (Map.Entry e : m.entrySet()) { - Object mk, mv, v; - if ((mk = e.getKey()) == null || - (mv = e.getValue()) == null || - (v = get(mk)) == null || - mv != v && !mv.equals(v)) { - return false; - } - } - } - return true; - } - - /** - * Stripped-down version of helper class used in previous version, - * declared for the sake of serialization compatibility - */ - static class Segment extends ReentrantLock implements Serializable { - private static final long serialVersionUID = 2249069246763182397L; - final float loadFactor; - Segment(float lf) { this.loadFactor = lf; } - } - - /** - * Saves the state of the {@code ConcurrentHashMapV8} instance to a - * stream (i.e., serializes it). - * @param s the stream - * @throws java.io.IOException if an I/O error occurs - * @serialData - * the key (Object) and value (Object) - * for each key-value mapping, followed by a null pair. - * The key-value mappings are emitted in no particular order. - */ - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - // For serialization compatibility - // Emulate segment calculation from previous version of this class - int sshift = 0; - int ssize = 1; - while (ssize < DEFAULT_CONCURRENCY_LEVEL) { - ++sshift; - ssize <<= 1; - } - int segmentShift = 32 - sshift; - int segmentMask = ssize - 1; - @SuppressWarnings("unchecked") Segment[] segments = (Segment[]) - new Segment[DEFAULT_CONCURRENCY_LEVEL]; - for (int i = 0; i < segments.length; ++i) { - segments[i] = new Segment(LOAD_FACTOR); - } - s.putFields().put("segments", segments); - s.putFields().put("segmentShift", segmentShift); - s.putFields().put("segmentMask", segmentMask); - s.writeFields(); - - Node[] t; - if ((t = this.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - s.writeObject(p.key); - s.writeObject(p.val); - } - } - s.writeObject(null); - s.writeObject(null); - segments = null; // throw away - } - - /** - * Reconstitutes the instance from a stream (that is, deserializes it). - * @param s the stream - * @throws ClassNotFoundException if the class of a serialized object - * could not be found - * @throws java.io.IOException if an I/O error occurs - */ - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - /* - * To improve performance in typical cases, we create nodes - * while reading, then place in table once size is known. - * However, we must also validate uniqueness and deal with - * overpopulated bins while doing so, which requires - * specialized versions of putVal mechanics. - */ - this.sizeCtl = -1; // force exclusion for table construction - s.defaultReadObject(); - long size = 0L; - Node p = null; - for (;;) { - @SuppressWarnings("unchecked") K k = (K) s.readObject(); - @SuppressWarnings("unchecked") V v = (V) s.readObject(); - if (k != null && v != null) { - p = new Node(spread(k.hashCode()), k, v, p); - ++size; - } else { - break; - } - } - if (size == 0L) { - this.sizeCtl = 0; - } else { - int n; - if (size >= MAXIMUM_CAPACITY >>> 1) { - n = MAXIMUM_CAPACITY; - } else { - int sz = (int)size; - n = tableSizeFor(sz + (sz >>> 1) + 1); - } - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] tab = new Node[n]; - int mask = n - 1; - long added = 0L; - while (p != null) { - boolean insertAtFront; - Node next = p.next, first; - int h = p.hash, j = h & mask; - if ((first = tabAt(tab, j)) == null) { - insertAtFront = true; - } else { - K k = p.key; - if (first.hash < 0) { - TreeBin t = (TreeBin)first; - if (t.putTreeVal(h, k, p.val) == null) { - ++added; - } - insertAtFront = false; - } - else { - int binCount = 0; - insertAtFront = true; - Node q; K qk; - for (q = first; q != null; q = q.next) { - if (q.hash == h && - ((qk = q.key) == k || - qk != null && k.equals(qk))) { - insertAtFront = false; - break; - } - ++binCount; - } - if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { - insertAtFront = false; - ++added; - p.next = first; - TreeNode hd = null, tl = null; - for (q = p; q != null; q = q.next) { - TreeNode t = new TreeNode - (q.hash, q.key, q.val, null, null); - if ((t.prev = tl) == null) { - hd = t; - } else { - tl.next = t; - } - tl = t; - } - setTabAt(tab, j, new TreeBin(hd)); - } - } - } - if (insertAtFront) { - ++added; - p.next = first; - setTabAt(tab, j, p); - } - p = next; - } - this.table = tab; - this.sizeCtl = n - (n >>> 2); - this.baseCount = added; - } - } - - // ConcurrentMap methods - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or {@code null} if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - @Override - public V putIfAbsent(K key, V value) { - return putVal(key, value, true); - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if the specified key is null - */ - @Override - public boolean remove(Object key, Object value) { - if (key == null) { - throw new NullPointerException(); - } - return value != null && replaceNode(key, null, value) != null; - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if any of the arguments are null - */ - @Override - public boolean replace(K key, V oldValue, V newValue) { - if (key == null || oldValue == null || newValue == null) { - throw new NullPointerException(); - } - return replaceNode(key, newValue, oldValue) != null; - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or {@code null} if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - @Override - public V replace(K key, V value) { - if (key == null || value == null) { - throw new NullPointerException(); - } - return replaceNode(key, value, null); - } - - // Overrides of JDK8+ Map extension method defaults - - /** - * Returns the value to which the specified key is mapped, or the - * given default value if this map contains no mapping for the - * key. - * - * @param key the key whose associated value is to be returned - * @param defaultValue the value to return if this map contains - * no mapping for the given key - * @return the mapping for the key, if present; else the default value - * @throws NullPointerException if the specified key is null - */ - public V getOrDefault(Object key, V defaultValue) { - V v; - return (v = get(key)) == null ? defaultValue : v; - } - - public void forEach(BiAction action) { - if (action == null) { - throw new NullPointerException(); - } - Node[] t; - if ((t = this.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - action.apply(p.key, p.val); - } - } - } - - public void replaceAll(BiFun function) { - if (function == null) { - throw new NullPointerException(); - } - Node[] t; - if ((t = this.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - V oldValue = p.val; - for (K key = p.key;;) { - V newValue = function.apply(key, oldValue); - if (newValue == null) { - throw new NullPointerException(); - } - if (replaceNode(key, newValue, oldValue) != null || - (oldValue = get(key)) == null) { - break; - } - } - } - } - } - - /** - * If the specified key is not already associated with a value, - * attempts to compute its value using the given mapping function - * and enters it into this map unless {@code null}. The entire - * method invocation is performed atomically, so the function is - * applied at most once per key. Some attempted update operations - * on this map by other threads may be blocked while computation - * is in progress, so the computation should be short and simple, - * and must not attempt to update any other mappings of this map. - * - * @param key key with which the specified value is to be associated - * @param mappingFunction the function to compute a value - * @return the current (existing or computed) value associated with - * the specified key, or null if the computed value is null - * @throws NullPointerException if the specified key or mappingFunction - * is null - * @throws IllegalStateException if the computation detectably - * attempts a recursive update to this map that would - * otherwise never complete - * @throws RuntimeException or Error if the mappingFunction does so, - * in which case the mapping is left unestablished - */ - public V computeIfAbsent(K key, Fun mappingFunction) { - if (key == null || mappingFunction == null) { - throw new NullPointerException(); - } - int h = spread(key.hashCode()); - V val = null; - int binCount = 0; - for (Node[] tab = this.table;;) { - Node f; int n, i, fh; - if (tab == null || (n = tab.length) == 0) { - tab = initTable(); - } else if ((f = tabAt(tab, i = n - 1 & h)) == null) { - Node r = new ReservationNode(); - synchronized (r) { - if (casTabAt(tab, i, null, r)) { - binCount = 1; - Node node = null; - try { - if ((val = mappingFunction.apply(key)) != null) { - node = new Node(h, key, val, null); - } - } finally { - setTabAt(tab, i, node); - } - } - } - if (binCount != 0) { - break; - } - } - else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - } else { - boolean added = false; - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - binCount = 1; - for (Node e = f;; ++binCount) { - K ek; V ev; - if (e.hash == h && - ((ek = e.key) == key || - ek != null && key.equals(ek))) { - val = e.val; - break; - } - Node pred = e; - if ((e = e.next) == null) { - if ((val = mappingFunction.apply(key)) != null) { - added = true; - pred.next = new Node(h, key, val, null); - } - break; - } - } - } - else if (f instanceof TreeBin) { - binCount = 2; - TreeBin t = (TreeBin)f; - TreeNode r, p; - if ((r = t.root) != null && - (p = r.findTreeNode(h, key, null)) != null) { - val = p.val; - } else if ((val = mappingFunction.apply(key)) != null) { - added = true; - t.putTreeVal(h, key, val); - } - } - } - } - if (binCount != 0) { - if (binCount >= TREEIFY_THRESHOLD) { - treeifyBin(tab, i); - } - if (!added) { - return val; - } - break; - } - } - } - if (val != null) { - addCount(1L, binCount); - } - return val; - } - - /** - * If the value for the specified key is present, attempts to - * compute a new mapping given the key and its current mapped - * value. The entire method invocation is performed atomically. - * Some attempted update operations on this map by other threads - * may be blocked while computation is in progress, so the - * computation should be short and simple, and must not attempt to - * update any other mappings of this map. - * - * @param key key with which a value may be associated - * @param remappingFunction the function to compute a value - * @return the new value associated with the specified key, or null if none - * @throws NullPointerException if the specified key or remappingFunction - * is null - * @throws IllegalStateException if the computation detectably - * attempts a recursive update to this map that would - * otherwise never complete - * @throws RuntimeException or Error if the remappingFunction does so, - * in which case the mapping is unchanged - */ - public V computeIfPresent(K key, BiFun remappingFunction) { - if (key == null || remappingFunction == null) { - throw new NullPointerException(); - } - int h = spread(key.hashCode()); - V val = null; - int delta = 0; - int binCount = 0; - for (Node[] tab = this.table;;) { - Node f; int n, i, fh; - if (tab == null || (n = tab.length) == 0) { - tab = initTable(); - } else if ((f = tabAt(tab, i = n - 1 & h)) == null) { - break; - } else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - } else { - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - binCount = 1; - for (Node e = f, pred = null;; ++binCount) { - K ek; - if (e.hash == h && - ((ek = e.key) == key || - ek != null && key.equals(ek))) { - val = remappingFunction.apply(key, e.val); - if (val != null) { - e.val = val; - } else { - delta = -1; - Node en = e.next; - if (pred != null) { - pred.next = en; - } else { - setTabAt(tab, i, en); - } - } - break; - } - pred = e; - if ((e = e.next) == null) { - break; - } - } - } - else if (f instanceof TreeBin) { - binCount = 2; - TreeBin t = (TreeBin)f; - TreeNode r, p; - if ((r = t.root) != null && - (p = r.findTreeNode(h, key, null)) != null) { - val = remappingFunction.apply(key, p.val); - if (val != null) { - p.val = val; - } else { - delta = -1; - if (t.removeTreeNode(p)) { - setTabAt(tab, i, untreeify(t.first)); - } - } - } - } - } - } - if (binCount != 0) { - break; - } - } - } - if (delta != 0) { - addCount(delta, binCount); - } - return val; - } - - /** - * Attempts to compute a mapping for the specified key and its - * current mapped value (or {@code null} if there is no current - * mapping). The entire method invocation is performed atomically. - * Some attempted update operations on this map by other threads - * may be blocked while computation is in progress, so the - * computation should be short and simple, and must not attempt to - * update any other mappings of this Map. - * - * @param key key with which the specified value is to be associated - * @param remappingFunction the function to compute a value - * @return the new value associated with the specified key, or null if none - * @throws NullPointerException if the specified key or remappingFunction - * is null - * @throws IllegalStateException if the computation detectably - * attempts a recursive update to this map that would - * otherwise never complete - * @throws RuntimeException or Error if the remappingFunction does so, - * in which case the mapping is unchanged - */ - public V compute(K key, - BiFun remappingFunction) { - if (key == null || remappingFunction == null) { - throw new NullPointerException(); - } - int h = spread(key.hashCode()); - V val = null; - int delta = 0; - int binCount = 0; - for (Node[] tab = this.table;;) { - Node f; int n, i, fh; - if (tab == null || (n = tab.length) == 0) { - tab = initTable(); - } else if ((f = tabAt(tab, i = n - 1 & h)) == null) { - Node r = new ReservationNode(); - synchronized (r) { - if (casTabAt(tab, i, null, r)) { - binCount = 1; - Node node = null; - try { - if ((val = remappingFunction.apply(key, null)) != null) { - delta = 1; - node = new Node(h, key, val, null); - } - } finally { - setTabAt(tab, i, node); - } - } - } - if (binCount != 0) { - break; - } - } - else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - } else { - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - binCount = 1; - for (Node e = f, pred = null;; ++binCount) { - K ek; - if (e.hash == h && - ((ek = e.key) == key || - ek != null && key.equals(ek))) { - val = remappingFunction.apply(key, e.val); - if (val != null) { - e.val = val; - } else { - delta = -1; - Node en = e.next; - if (pred != null) { - pred.next = en; - } else { - setTabAt(tab, i, en); - } - } - break; - } - pred = e; - if ((e = e.next) == null) { - val = remappingFunction.apply(key, null); - if (val != null) { - delta = 1; - pred.next = - new Node(h, key, val, null); - } - break; - } - } - } - else if (f instanceof TreeBin) { - binCount = 1; - TreeBin t = (TreeBin)f; - TreeNode r, p; - if ((r = t.root) != null) { - p = r.findTreeNode(h, key, null); - } else { - p = null; - } - V pv = p == null ? null : p.val; - val = remappingFunction.apply(key, pv); - if (val != null) { - if (p != null) { - p.val = val; - } else { - delta = 1; - t.putTreeVal(h, key, val); - } - } - else if (p != null) { - delta = -1; - if (t.removeTreeNode(p)) { - setTabAt(tab, i, untreeify(t.first)); - } - } - } - } - } - if (binCount != 0) { - if (binCount >= TREEIFY_THRESHOLD) { - treeifyBin(tab, i); - } - break; - } - } - } - if (delta != 0) { - addCount(delta, binCount); - } - return val; - } - - /** - * If the specified key is not already associated with a - * (non-null) value, associates it with the given value. - * Otherwise, replaces the value with the results of the given - * remapping function, or removes if {@code null}. The entire - * method invocation is performed atomically. Some attempted - * update operations on this map by other threads may be blocked - * while computation is in progress, so the computation should be - * short and simple, and must not attempt to update any other - * mappings of this Map. - * - * @param key key with which the specified value is to be associated - * @param value the value to use if absent - * @param remappingFunction the function to recompute a value if present - * @return the new value associated with the specified key, or null if none - * @throws NullPointerException if the specified key or the - * remappingFunction is null - * @throws RuntimeException or Error if the remappingFunction does so, - * in which case the mapping is unchanged - */ - public V merge(K key, V value, BiFun remappingFunction) { - if (key == null || value == null || remappingFunction == null) { - throw new NullPointerException(); - } - int h = spread(key.hashCode()); - V val = null; - int delta = 0; - int binCount = 0; - for (Node[] tab = this.table;;) { - Node f; int n, i, fh; - if (tab == null || (n = tab.length) == 0) { - tab = initTable(); - } else if ((f = tabAt(tab, i = n - 1 & h)) == null) { - if (casTabAt(tab, i, null, new Node(h, key, value, null))) { - delta = 1; - val = value; - break; - } - } - else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - } else { - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - binCount = 1; - for (Node e = f, pred = null;; ++binCount) { - K ek; - if (e.hash == h && - ((ek = e.key) == key || - ek != null && key.equals(ek))) { - val = remappingFunction.apply(e.val, value); - if (val != null) { - e.val = val; - } else { - delta = -1; - Node en = e.next; - if (pred != null) { - pred.next = en; - } else { - setTabAt(tab, i, en); - } - } - break; - } - pred = e; - if ((e = e.next) == null) { - delta = 1; - val = value; - pred.next = - new Node(h, key, val, null); - break; - } - } - } - else if (f instanceof TreeBin) { - binCount = 2; - TreeBin t = (TreeBin)f; - TreeNode r = t.root; - TreeNode p = r == null ? null : - r.findTreeNode(h, key, null); - val = p == null ? value : - remappingFunction.apply(p.val, value); - if (val != null) { - if (p != null) { - p.val = val; - } else { - delta = 1; - t.putTreeVal(h, key, val); - } - } - else if (p != null) { - delta = -1; - if (t.removeTreeNode(p)) { - setTabAt(tab, i, untreeify(t.first)); - } - } - } - } - } - if (binCount != 0) { - if (binCount >= TREEIFY_THRESHOLD) { - treeifyBin(tab, i); - } - break; - } - } - } - if (delta != 0) { - addCount(delta, binCount); - } - return val; - } - - // Hashtable legacy methods - - /** - * Legacy method testing if some key maps into the specified value - * in this table. This method is identical in functionality to - * {@link #containsValue(Object)}, and exists solely to ensure - * full compatibility with class {@link java.util.Hashtable}, - * which supported this method prior to introduction of the - * Java Collections framework. - * - * @param value a value to search for - * @return {@code true} if and only if some key maps to the - * {@code value} argument in this table as - * determined by the {@code equals} method; - * {@code false} otherwise - * @throws NullPointerException if the specified value is null - */ - @Deprecated public boolean contains(Object value) { - return containsValue(value); - } - - /** - * Returns an enumeration of the keys in this table. - * - * @return an enumeration of the keys in this table - * @see #keySet() - */ - public Enumeration keys() { - Node[] t; - int f = (t = this.table) == null ? 0 : t.length; - return new KeyIterator(t, f, 0, f, this); - } - - /** - * Returns an enumeration of the values in this table. - * - * @return an enumeration of the values in this table - * @see #values() - */ - public Enumeration elements() { - Node[] t; - int f = (t = this.table) == null ? 0 : t.length; - return new ValueIterator(t, f, 0, f, this); - } - - // ConcurrentHashMapV8-only methods - - /** - * Returns the number of mappings. This method should be used - * instead of {@link #size} because a ConcurrentHashMapV8 may - * contain more mappings than can be represented as an int. The - * value returned is an estimate; the actual count may differ if - * there are concurrent insertions or removals. - * - * @return the number of mappings - * @since 1.8 - */ - public long mappingCount() { - long n = sumCount(); - return n < 0L ? 0L : n; // ignore transient negative values - } - - /** - * Creates a new {@link Set} backed by a ConcurrentHashMapV8 - * from the given type to {@code Boolean.TRUE}. - * - * @return the new set - * @since 1.8 - */ - public static KeySetView newKeySet() { - return new KeySetView - (new ConcurrentHashMapV8(), Boolean.TRUE); - } - - /** - * Creates a new {@link Set} backed by a ConcurrentHashMapV8 - * from the given type to {@code Boolean.TRUE}. - * - * @param initialCapacity The implementation performs internal - * sizing to accommodate this many elements. - * @return the new set - * @throws IllegalArgumentException if the initial capacity of - * elements is negative - * @since 1.8 - */ - public static KeySetView newKeySet(int initialCapacity) { - return new KeySetView - (new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE); - } - - /** - * Returns a {@link Set} view of the keys in this map, using the - * given common mapped value for any additions (i.e., {@link - * Collection#add} and {@link Collection#addAll(Collection)}). - * This is of course only appropriate if it is acceptable to use - * the same value for all additions from this view. - * - * @param mappedValue the mapped value to use for any additions - * @return the set view - * @throws NullPointerException if the mappedValue is null - */ - public KeySetView keySet(V mappedValue) { - if (mappedValue == null) { - throw new NullPointerException(); - } - return new KeySetView(this, mappedValue); - } - - /* ---------------- Special Nodes -------------- */ - - /** - * A node inserted at head of bins during transfer operations. - */ - static final class ForwardingNode extends Node { - final Node[] nextTable; - ForwardingNode(Node[] tab) { - super(MOVED, null, null, null); - this.nextTable = tab; - } - - @Override - Node find(int h, Object k) { - // loop to avoid arbitrarily deep recursion on forwarding nodes - outer: for (Node[] tab = this.nextTable;;) { - Node e; int n; - if (k == null || tab == null || (n = tab.length) == 0 || - (e = tabAt(tab, n - 1 & h)) == null) { - return null; - } - for (;;) { - int eh; K ek; - if ((eh = e.hash) == h && - ((ek = e.key) == k || ek != null && k.equals(ek))) { - return e; - } - if (eh < 0) { - if (e instanceof ForwardingNode) { - tab = ((ForwardingNode)e).nextTable; - continue outer; - } else { - return e.find(h, k); - } - } - if ((e = e.next) == null) { - return null; - } - } - } - } - } - - /** - * A place-holder node used in computeIfAbsent and compute - */ - static final class ReservationNode extends Node { - ReservationNode() { - super(RESERVED, null, null, null); - } - - @Override - Node find(int h, Object k) { - return null; - } - } - - /* ---------------- Table Initialization and Resizing -------------- */ - - /** - * Initializes table, using the size recorded in sizeCtl. - */ - private final Node[] initTable() { - Node[] tab; int sc; - while ((tab = this.table) == null || tab.length == 0) { - if ((sc = this.sizeCtl) < 0) { - Thread.yield(); // lost initialization race; just spin - } else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { - try { - if ((tab = this.table) == null || tab.length == 0) { - int n = sc > 0 ? sc : DEFAULT_CAPACITY; - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = new Node[n]; - this.table = tab = nt; - sc = n - (n >>> 2); - } - } finally { - this.sizeCtl = sc; - } - break; - } - } - return tab; - } - - /** - * Adds to count, and if table is too small and not already - * resizing, initiates transfer. If already resizing, helps - * perform transfer if work is available. Rechecks occupancy - * after a transfer to see if another resize is already needed - * because resizings are lagging additions. - * - * @param x the count to add - * @param check if <0, don't check resize, if <= 1 only check if uncontended - */ - private final void addCount(long x, int check) { - CounterCell[] as; long b, s; - if ((as = this.counterCells) != null || - !U.compareAndSwapLong(this, BASECOUNT, b = this.baseCount, s = b + x)) { - CounterHashCode hc; CounterCell a; long v; int m; - boolean uncontended = true; - if ((hc = threadCounterHashCode.get()) == null || - as == null || (m = as.length - 1) < 0 || - (a = as[m & hc.code]) == null || - !(uncontended = - U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { - fullAddCount(x, hc, uncontended); - return; - } - if (check <= 1) { - return; - } - s = sumCount(); - } - if (check >= 0) { - Node[] tab, nt; int sc; - while (s >= (sc = this.sizeCtl) && (tab = this.table) != null && - tab.length < MAXIMUM_CAPACITY) { - if (sc < 0) { - if (sc == -1 || this.transferIndex <= this.transferOrigin || - (nt = this.nextTable) == null) { - break; - } - if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) { - transfer(tab, nt); - } - } - else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) { - transfer(tab, null); - } - s = sumCount(); - } - } - } - - /** - * Helps transfer if a resize is in progress. - */ - final Node[] helpTransfer(Node[] tab, Node f) { - Node[] nextTab; int sc; - if (f instanceof ForwardingNode && - (nextTab = ((ForwardingNode)f).nextTable) != null) { - if (nextTab == this.nextTable && tab == this.table && - this.transferIndex > this.transferOrigin && (sc = this.sizeCtl) < -1 && - U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) { - transfer(tab, nextTab); - } - return nextTab; - } - return this.table; - } - - /** - * Tries to presize table to accommodate the given number of elements. - * - * @param size number of elements (doesn't need to be perfectly accurate) - */ - private final void tryPresize(int size) { - int c = size >= MAXIMUM_CAPACITY >>> 1 ? MAXIMUM_CAPACITY : - tableSizeFor(size + (size >>> 1) + 1); - int sc; - while ((sc = this.sizeCtl) >= 0) { - Node[] tab = this.table; int n; - if (tab == null || (n = tab.length) == 0) { - n = sc > c ? sc : c; - if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { - try { - if (this.table == tab) { - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = new Node[n]; - this.table = nt; - sc = n - (n >>> 2); - } - } finally { - this.sizeCtl = sc; - } - } - } - else if (c <= sc || n >= MAXIMUM_CAPACITY) { - break; - } else if (tab == this.table && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) { - transfer(tab, null); - } - } - } - - /** - * Moves and/or copies the nodes in each bin to new table. See - * above for explanation. - */ - private final void transfer(Node[] tab, Node[] nextTab) { - int n = tab.length, stride; - if ((stride = NCPU > 1 ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) - { - stride = MIN_TRANSFER_STRIDE; // subdivide range - } - if (nextTab == null) { // initiating - try { - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = new Node[n << 1]; - nextTab = nt; - } catch (Throwable ex) { // try to cope with OOME - this.sizeCtl = Integer.MAX_VALUE; - return; - } - this.nextTable = nextTab; - this.transferOrigin = n; - this.transferIndex = n; - ForwardingNode rev = new ForwardingNode(tab); - for (int k = n; k > 0;) { // progressively reveal ready slots - int nextk = k > stride ? k - stride : 0; - for (int m = nextk; m < k; ++m) { - nextTab[m] = rev; - } - for (int m = n + nextk; m < n + k; ++m) { - nextTab[m] = rev; - } - U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); - } - } - int nextn = nextTab.length; - ForwardingNode fwd = new ForwardingNode(nextTab); - boolean advance = true; - boolean finishing = false; // to ensure sweep before committing nextTab - for (int i = 0, bound = 0;;) { - int nextIndex, nextBound, fh; Node f; - while (advance) { - if (--i >= bound || finishing) { - advance = false; - } else if ((nextIndex = this.transferIndex) <= this.transferOrigin) { - i = -1; - advance = false; - } - else if (U.compareAndSwapInt - (this, TRANSFERINDEX, nextIndex, - nextBound = nextIndex > stride ? - nextIndex - stride : 0)) { - bound = nextBound; - i = nextIndex - 1; - advance = false; - } - } - if (i < 0 || i >= n || i + n >= nextn) { - if (finishing) { - this.nextTable = null; - this.table = nextTab; - this.sizeCtl = (n << 1) - (n >>> 1); - return; - } - for (int sc;;) { - if (U.compareAndSwapInt(this, SIZECTL, sc = this.sizeCtl, ++sc)) { - if (sc != -1) { - return; - } - finishing = advance = true; - i = n; // recheck before commit - break; - } - } - } - else if ((f = tabAt(tab, i)) == null) { - if (casTabAt(tab, i, null, fwd)) { - setTabAt(nextTab, i, null); - setTabAt(nextTab, i + n, null); - advance = true; - } - } - else if ((fh = f.hash) == MOVED) { - advance = true; // already processed - } else { - synchronized (f) { - if (tabAt(tab, i) == f) { - Node ln, hn; - if (fh >= 0) { - int runBit = fh & n; - Node lastRun = f; - for (Node p = f.next; p != null; p = p.next) { - int b = p.hash & n; - if (b != runBit) { - runBit = b; - lastRun = p; - } - } - if (runBit == 0) { - ln = lastRun; - hn = null; - } - else { - hn = lastRun; - ln = null; - } - for (Node p = f; p != lastRun; p = p.next) { - int ph = p.hash; K pk = p.key; V pv = p.val; - if ((ph & n) == 0) { - ln = new Node(ph, pk, pv, ln); - } else { - hn = new Node(ph, pk, pv, hn); - } - } - setTabAt(nextTab, i, ln); - setTabAt(nextTab, i + n, hn); - setTabAt(tab, i, fwd); - advance = true; - } - else if (f instanceof TreeBin) { - TreeBin t = (TreeBin)f; - TreeNode lo = null, loTail = null; - TreeNode hi = null, hiTail = null; - int lc = 0, hc = 0; - for (Node e = t.first; e != null; e = e.next) { - int h = e.hash; - TreeNode p = new TreeNode - (h, e.key, e.val, null, null); - if ((h & n) == 0) { - if ((p.prev = loTail) == null) { - lo = p; - } else { - loTail.next = p; - } - loTail = p; - ++lc; - } - else { - if ((p.prev = hiTail) == null) { - hi = p; - } else { - hiTail.next = p; - } - hiTail = p; - ++hc; - } - } - ln = lc <= UNTREEIFY_THRESHOLD ? untreeify(lo) : - hc != 0 ? new TreeBin(lo) : t; - hn = hc <= UNTREEIFY_THRESHOLD ? untreeify(hi) : - lc != 0 ? new TreeBin(hi) : t; - setTabAt(nextTab, i, ln); - setTabAt(nextTab, i + n, hn); - setTabAt(tab, i, fwd); - advance = true; - } - } - } - } - } - } - - /* ---------------- Conversion from/to TreeBins -------------- */ - - /** - * Replaces all linked nodes in bin at given index unless table is - * too small, in which case resizes instead. - */ - private final void treeifyBin(Node[] tab, int index) { - Node b; int n, sc; - if (tab != null) { - if ((n = tab.length) < MIN_TREEIFY_CAPACITY) { - if (tab == this.table && (sc = this.sizeCtl) >= 0 && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) { - transfer(tab, null); - } - } - else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { - synchronized (b) { - if (tabAt(tab, index) == b) { - TreeNode hd = null, tl = null; - for (Node e = b; e != null; e = e.next) { - TreeNode p = - new TreeNode(e.hash, e.key, e.val, - null, null); - if ((p.prev = tl) == null) { - hd = p; - } else { - tl.next = p; - } - tl = p; - } - setTabAt(tab, index, new TreeBin(hd)); - } - } - } - } - } - - /** - * Returns a list on non-TreeNodes replacing those in given list. - */ - static Node untreeify(Node b) { - Node hd = null, tl = null; - for (Node q = b; q != null; q = q.next) { - Node p = new Node(q.hash, q.key, q.val, null); - if (tl == null) { - hd = p; - } else { - tl.next = p; - } - tl = p; - } - return hd; - } - - /* ---------------- TreeNodes -------------- */ - - /** - * Nodes for use in TreeBins - */ - static final class TreeNode extends Node { - TreeNode parent; // red-black tree links - TreeNode left; - TreeNode right; - TreeNode prev; // needed to unlink next upon deletion - boolean red; - - TreeNode(int hash, K key, V val, Node next, - TreeNode parent) { - super(hash, key, val, next); - this.parent = parent; - } - - @Override - Node find(int h, Object k) { - return findTreeNode(h, k, null); - } - - /** - * Returns the TreeNode (or null if not found) for the given key - * starting at given root. - */ - final TreeNode findTreeNode(int h, Object k, Class kc) { - if (k != null) { - TreeNode p = this; - do { - int ph, dir; K pk; TreeNode q; - TreeNode pl = p.left, pr = p.right; - if ((ph = p.hash) > h) { - p = pl; - } else if (ph < h) { - p = pr; - } else if ((pk = p.key) == k || pk != null && k.equals(pk)) { - return p; - } else if (pl == null) { - p = pr; - } else if (pr == null) { - p = pl; - } else if ((kc != null || - (kc = comparableClassFor(k)) != null) && - (dir = compareComparables(kc, k, pk)) != 0) { - p = dir < 0 ? pl : pr; - } else if ((q = pr.findTreeNode(h, k, kc)) != null) { - return q; - } else { - p = pl; - } - } while (p != null); - } - return null; - } - } - - /* ---------------- TreeBins -------------- */ - - /** - * TreeNodes used at the heads of bins. TreeBins do not hold user - * keys or values, but instead point to list of TreeNodes and - * their root. They also maintain a parasitic read-write lock - * forcing writers (who hold bin lock) to wait for readers (who do - * not) to complete before tree restructuring operations. - */ - static final class TreeBin extends Node { - TreeNode root; - volatile TreeNode first; - volatile Thread waiter; - volatile int lockState; - // values for lockState - static final int WRITER = 1; // set while holding write lock - static final int WAITER = 2; // set when waiting for write lock - static final int READER = 4; // increment value for setting read lock - - /** - * Tie-breaking utility for ordering insertions when equal - * hashCodes and non-comparable. We don't require a total - * order, just a consistent insertion rule to maintain - * equivalence across rebalancings. Tie-breaking further than - * necessary simplifies testing a bit. - */ - static int tieBreakOrder(Object a, Object b) { - int d; - if (a == null || b == null || - (d = a.getClass().getName(). - compareTo(b.getClass().getName())) == 0) { - d = System.identityHashCode(a) <= System.identityHashCode(b) ? - -1 : 1; - } - return d; - } - - /** - * Creates bin with initial set of nodes headed by b. - */ - TreeBin(TreeNode b) { - super(TREEBIN, null, null, null); - this.first = b; - TreeNode r = null; - for (TreeNode x = b, next; x != null; x = next) { - next = (TreeNode)x.next; - x.left = x.right = null; - if (r == null) { - x.parent = null; - x.red = false; - r = x; - } - else { - K k = x.key; - int h = x.hash; - Class kc = null; - for (TreeNode p = r;;) { - int dir, ph; - K pk = p.key; - if ((ph = p.hash) > h) { - dir = -1; - } else if (ph < h) { - dir = 1; - } else if (kc == null && - (kc = comparableClassFor(k)) == null || - (dir = compareComparables(kc, k, pk)) == 0) { - dir = tieBreakOrder(k, pk); - } - TreeNode xp = p; - if ((p = dir <= 0 ? p.left : p.right) == null) { - x.parent = xp; - if (dir <= 0) { - xp.left = x; - } else { - xp.right = x; - } - r = balanceInsertion(r, x); - break; - } - } - } - } - this.root = r; - assert checkInvariants(this.root); - } - - /** - * Acquires write lock for tree restructuring. - */ - private final void lockRoot() { - if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) - { - contendedLock(); // offload to separate method - } - } - - /** - * Releases write lock for tree restructuring. - */ - private final void unlockRoot() { - this.lockState = 0; - } - - /** - * Possibly blocks awaiting root lock. - */ - private final void contendedLock() { - boolean waiting = false; - for (int s;;) { - if (((s = this.lockState) & WRITER) == 0) { - if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { - if (waiting) { - this.waiter = null; - } - return; - } - } - else if ((s | WAITER) == 0) { - if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { - waiting = true; - this.waiter = Thread.currentThread(); - } - } - else if (waiting) { - LockSupport.park(this); - } - } - } - - /** - * Returns matching node or null if none. Tries to search - * using tree comparisons from root, but continues linear - * search when lock not available. - */ -@Override -final Node find(int h, Object k) { - if (k != null) { - for (Node e = this.first; e != null; e = e.next) { - int s; K ek; - if (((s = this.lockState) & (WAITER|WRITER)) != 0) { - if (e.hash == h && - ((ek = e.key) == k || ek != null && k.equals(ek))) { - return e; - } - } - else if (U.compareAndSwapInt(this, LOCKSTATE, s, - s + READER)) { - TreeNode r, p; - try { - p = (r = this.root) == null ? null : - r.findTreeNode(h, k, null); - } finally { - Thread w; - int ls; - do {} while (!U.compareAndSwapInt - (this, LOCKSTATE, - ls = this.lockState, ls - READER)); - if (ls == (READER|WAITER) && (w = this.waiter) != null) { - LockSupport.unpark(w); - } - } - return p; - } - } - } - return null; - } - - /** - * Finds or adds a node. - * @return null if added - */ - final TreeNode putTreeVal(int h, K k, V v) { - Class kc = null; - boolean searched = false; - for (TreeNode p = this.root;;) { - int dir, ph; K pk; - if (p == null) { - this.first = this.root = new TreeNode(h, k, v, null, null); - break; - } - else if ((ph = p.hash) > h) { - dir = -1; - } else if (ph < h) { - dir = 1; - } else if ((pk = p.key) == k || pk != null && k.equals(pk)) { - return p; - } else if (kc == null && - (kc = comparableClassFor(k)) == null || - (dir = compareComparables(kc, k, pk)) == 0) { - if (!searched) { - TreeNode q, ch; - searched = true; - if ((ch = p.left) != null && - (q = ch.findTreeNode(h, k, kc)) != null || - (ch = p.right) != null && - (q = ch.findTreeNode(h, k, kc)) != null) { - return q; - } - } - dir = tieBreakOrder(k, pk); - } - - TreeNode xp = p; - if ((p = dir <= 0 ? p.left : p.right) == null) { - TreeNode x, f = this.first; - this.first = x = new TreeNode(h, k, v, f, xp); - if (f != null) { - f.prev = x; - } - if (dir <= 0) { - xp.left = x; - } else { - xp.right = x; - } - if (!xp.red) { - x.red = true; - } else { - lockRoot(); - try { - this.root = balanceInsertion(this.root, x); - } finally { - unlockRoot(); - } - } - break; - } - } - assert checkInvariants(this.root); - return null; - } - - /** - * Removes the given node, that must be present before this - * call. This is messier than typical red-black deletion code - * because we cannot swap the contents of an interior node - * with a leaf successor that is pinned by "next" pointers - * that are accessible independently of lock. So instead we - * swap the tree linkages. - * - * @return true if now too small, so should be untreeified - */ - final boolean removeTreeNode(TreeNode p) { - TreeNode next = (TreeNode)p.next; - TreeNode pred = p.prev; // unlink traversal pointers - TreeNode r, rl; - if (pred == null) { - this.first = next; - } else { - pred.next = next; - } - if (next != null) { - next.prev = pred; - } - if (this.first == null) { - this.root = null; - return true; - } - if ((r = this.root) == null || r.right == null || // too small - (rl = r.left) == null || rl.left == null) { - return true; - } - lockRoot(); - try { - TreeNode replacement; - TreeNode pl = p.left; - TreeNode pr = p.right; - if (pl != null && pr != null) { - TreeNode s = pr, sl; - while ((sl = s.left) != null) { - s = sl; - } - boolean c = s.red; s.red = p.red; p.red = c; // swap colors - TreeNode sr = s.right; - TreeNode pp = p.parent; - if (s == pr) { // p was s's direct parent - p.parent = s; - s.right = p; - } - else { - TreeNode sp = s.parent; - if ((p.parent = sp) != null) { - if (s == sp.left) { - sp.left = p; - } else { - sp.right = p; - } - } - if ((s.right = pr) != null) { - pr.parent = s; - } - } - p.left = null; - if ((p.right = sr) != null) { - sr.parent = p; - } - if ((s.left = pl) != null) { - pl.parent = s; - } - if ((s.parent = pp) == null) { - r = s; - } else if (p == pp.left) { - pp.left = s; - } else { - pp.right = s; - } - if (sr != null) { - replacement = sr; - } else { - replacement = p; - } - } - else if (pl != null) { - replacement = pl; - } else if (pr != null) { - replacement = pr; - } else { - replacement = p; - } - if (replacement != p) { - TreeNode pp = replacement.parent = p.parent; - if (pp == null) { - r = replacement; - } else if (p == pp.left) { - pp.left = replacement; - } else { - pp.right = replacement; - } - p.left = p.right = p.parent = null; - } - - this.root = p.red ? r : balanceDeletion(r, replacement); - - if (p == replacement) { // detach pointers - TreeNode pp; - if ((pp = p.parent) != null) { - if (p == pp.left) { - pp.left = null; - } else if (p == pp.right) { - pp.right = null; - } - p.parent = null; - } - } - } finally { - unlockRoot(); - } - assert checkInvariants(this.root); - return false; - } - - /* ------------------------------------------------------------ */ - // Red-black tree methods, all adapted from CLR - - static TreeNode rotateLeft(TreeNode root, - TreeNode p) { - TreeNode r, pp, rl; - if (p != null && (r = p.right) != null) { - if ((rl = p.right = r.left) != null) { - rl.parent = p; - } - if ((pp = r.parent = p.parent) == null) { - (root = r).red = false; - } else if (pp.left == p) { - pp.left = r; - } else { - pp.right = r; - } - r.left = p; - p.parent = r; - } - return root; - } - - static TreeNode rotateRight(TreeNode root, - TreeNode p) { - TreeNode l, pp, lr; - if (p != null && (l = p.left) != null) { - if ((lr = p.left = l.right) != null) { - lr.parent = p; - } - if ((pp = l.parent = p.parent) == null) { - (root = l).red = false; - } else if (pp.right == p) { - pp.right = l; - } else { - pp.left = l; - } - l.right = p; - p.parent = l; - } - return root; - } - - static TreeNode balanceInsertion(TreeNode root, - TreeNode x) { - x.red = true; - for (TreeNode xp, xpp, xppl, xppr;;) { - if ((xp = x.parent) == null) { - x.red = false; - return x; - } - else if (!xp.red || (xpp = xp.parent) == null) { - return root; - } - if (xp == (xppl = xpp.left)) { - if ((xppr = xpp.right) != null && xppr.red) { - xppr.red = false; - xp.red = false; - xpp.red = true; - x = xpp; - } - else { - if (x == xp.right) { - root = rotateLeft(root, x = xp); - xpp = (xp = x.parent) == null ? null : xp.parent; - } - if (xp != null) { - xp.red = false; - if (xpp != null) { - xpp.red = true; - root = rotateRight(root, xpp); - } - } - } - } - else { - if (xppl != null && xppl.red) { - xppl.red = false; - xp.red = false; - xpp.red = true; - x = xpp; - } - else { - if (x == xp.left) { - root = rotateRight(root, x = xp); - xpp = (xp = x.parent) == null ? null : xp.parent; - } - if (xp != null) { - xp.red = false; - if (xpp != null) { - xpp.red = true; - root = rotateLeft(root, xpp); - } - } - } - } - } - } - - static TreeNode balanceDeletion(TreeNode root, - TreeNode x) { - for (TreeNode xp, xpl, xpr;;) { - if (x == null || x == root) { - return root; - } else if ((xp = x.parent) == null) { - x.red = false; - return x; - } - else if (x.red) { - x.red = false; - return root; - } - else if ((xpl = xp.left) == x) { - if ((xpr = xp.right) != null && xpr.red) { - xpr.red = false; - xp.red = true; - root = rotateLeft(root, xp); - xpr = (xp = x.parent) == null ? null : xp.right; - } - if (xpr == null) { - x = xp; - } else { - TreeNode sl = xpr.left, sr = xpr.right; - if ((sr == null || !sr.red) && - (sl == null || !sl.red)) { - xpr.red = true; - x = xp; - } - else { - if (sr == null || !sr.red) { - if (sl != null) { - sl.red = false; - } - xpr.red = true; - root = rotateRight(root, xpr); - xpr = (xp = x.parent) == null ? - null : xp.right; - } - if (xpr != null) { - xpr.red = xp == null ? false : xp.red; - if ((sr = xpr.right) != null) { - sr.red = false; - } - } - if (xp != null) { - xp.red = false; - root = rotateLeft(root, xp); - } - x = root; - } - } - } - else { // symmetric - if (xpl != null && xpl.red) { - xpl.red = false; - xp.red = true; - root = rotateRight(root, xp); - xpl = (xp = x.parent) == null ? null : xp.left; - } - if (xpl == null) { - x = xp; - } else { - TreeNode sl = xpl.left, sr = xpl.right; - if ((sl == null || !sl.red) && - (sr == null || !sr.red)) { - xpl.red = true; - x = xp; - } - else { - if (sl == null || !sl.red) { - if (sr != null) { - sr.red = false; - } - xpl.red = true; - root = rotateLeft(root, xpl); - xpl = (xp = x.parent) == null ? - null : xp.left; - } - if (xpl != null) { - xpl.red = xp == null ? false : xp.red; - if ((sl = xpl.left) != null) { - sl.red = false; - } - } - if (xp != null) { - xp.red = false; - root = rotateRight(root, xp); - } - x = root; - } - } - } - } - } - - /** - * Recursive invariant check - */ - static boolean checkInvariants(TreeNode t) { - TreeNode tp = t.parent, tl = t.left, tr = t.right, - tb = t.prev, tn = (TreeNode)t.next; - if (tb != null && tb.next != t) { - return false; - } - if (tn != null && tn.prev != t) { - return false; - } - if (tp != null && t != tp.left && t != tp.right) { - return false; - } - if (tl != null && (tl.parent != t || tl.hash > t.hash)) { - return false; - } - if (tr != null && (tr.parent != t || tr.hash < t.hash)) { - return false; - } - if (t.red && tl != null && tl.red && tr != null && tr.red) { - return false; - } - if (tl != null && !checkInvariants(tl)) { - return false; - } - if (tr != null && !checkInvariants(tr)) { - return false; - } - return true; - } - - private static final sun.misc.Unsafe U; - private static final long LOCKSTATE; - static { - try { - U = getUnsafe(); - Class k = TreeBin.class; - LOCKSTATE = U.objectFieldOffset - (k.getDeclaredField("lockState")); - } catch (Exception e) { - throw new Error(e); - } - } - } - - /* ----------------Table Traversal -------------- */ - - /** - * Encapsulates traversal for methods such as containsValue; also - * serves as a base class for other iterators and spliterators. - * - * Method advance visits once each still-valid node that was - * reachable upon iterator construction. It might miss some that - * were added to a bin after the bin was visited, which is OK wrt - * consistency guarantees. Maintaining this property in the face - * of possible ongoing resizes requires a fair amount of - * bookkeeping state that is difficult to optimize away amidst - * volatile accesses. Even so, traversal maintains reasonable - * throughput. - * - * Normally, iteration proceeds bin-by-bin traversing lists. - * However, if the table has been resized, then all future steps - * must traverse both the bin at the current index as well as at - * (index + baseSize); and so on for further resizings. To - * paranoically cope with potential sharing by users of iterators - * across threads, iteration terminates if a bounds checks fails - * for a table read. - */ - static class Traverser { - Node[] tab; // current table; updated if resized - Node next; // the next entry to use - int index; // index of bin to use next - int baseIndex; // current index of initial table - int baseLimit; // index bound for initial table - final int baseSize; // initial table size - - Traverser(Node[] tab, int size, int index, int limit) { - this.tab = tab; - this.baseSize = size; - this.baseIndex = this.index = index; - this.baseLimit = limit; - this.next = null; - } - - /** - * Advances if possible, returning next valid node, or null if none. - */ - final Node advance() { - Node e; - if ((e = this.next) != null) { - e = e.next; - } - for (;;) { - Node[] t; int i, n; K ek; // must use locals in checks - if (e != null) { - return this.next = e; - } - if (this.baseIndex >= this.baseLimit || (t = this.tab) == null || - (n = t.length) <= (i = this.index) || i < 0) { - return this.next = null; - } - if ((e = tabAt(t, this.index)) != null && e.hash < 0) { - if (e instanceof ForwardingNode) { - this.tab = ((ForwardingNode)e).nextTable; - e = null; - continue; - } - else if (e instanceof TreeBin) { - e = ((TreeBin)e).first; - } else { - e = null; - } - } - if ((this.index += this.baseSize) >= n) - { - this.index = ++this.baseIndex; // visit upper slots if present - } - } - } - } - - /** - * Base of key, value, and entry Iterators. Adds fields to - * Traverser to support iterator.remove. - */ - static class BaseIterator extends Traverser { - final ConcurrentHashMapV8 map; - Node lastReturned; - BaseIterator(Node[] tab, int size, int index, int limit, - ConcurrentHashMapV8 map) { - super(tab, size, index, limit); - this.map = map; - advance(); - } - - public final boolean hasNext() { return this.next != null; } - public final boolean hasMoreElements() { return this.next != null; } - - public final void remove() { - Node p; - if ((p = this.lastReturned) == null) { - throw new IllegalStateException(); - } - this.lastReturned = null; - this.map.replaceNode(p.key, null, null); - } - } - - static final class KeyIterator extends BaseIterator - implements Iterator, Enumeration { - KeyIterator(Node[] tab, int index, int size, int limit, - ConcurrentHashMapV8 map) { - super(tab, index, size, limit, map); - } - - @Override - public final K next() { - Node p; - if ((p = this.next) == null) { - throw new NoSuchElementException(); - } - K k = p.key; - this.lastReturned = p; - advance(); - return k; - } - - @Override - public final K nextElement() { return next(); } - } - - static final class ValueIterator extends BaseIterator - implements Iterator, Enumeration { - ValueIterator(Node[] tab, int index, int size, int limit, - ConcurrentHashMapV8 map) { - super(tab, index, size, limit, map); - } - - @Override - public final V next() { - Node p; - if ((p = this.next) == null) { - throw new NoSuchElementException(); - } - V v = p.val; - this.lastReturned = p; - advance(); - return v; - } - - @Override - public final V nextElement() { return next(); } - } - - static final class EntryIterator extends BaseIterator - implements Iterator> { - EntryIterator(Node[] tab, int index, int size, int limit, - ConcurrentHashMapV8 map) { - super(tab, index, size, limit, map); - } - - @Override - public final Map.Entry next() { - Node p; - if ((p = this.next) == null) { - throw new NoSuchElementException(); - } - K k = p.key; - V v = p.val; - this.lastReturned = p; - advance(); - return new MapEntry(k, v, this.map); - } - } - - /** - * Exported Entry for EntryIterator - */ - static final class MapEntry implements Map.Entry { - final K key; // non-null - V val; // non-null - final ConcurrentHashMapV8 map; - MapEntry(K key, V val, ConcurrentHashMapV8 map) { - this.key = key; - this.val = val; - this.map = map; - } - @Override - public K getKey() { return this.key; } - @Override - public V getValue() { return this.val; } - @Override - public int hashCode() { return this.key.hashCode() ^ this.val.hashCode(); } - @Override - public String toString() { return this.key + "=" + this.val; } - - @Override - public boolean equals(Object o) { - Object k, v; Map.Entry e; - return o instanceof Map.Entry && - (k = (e = (Map.Entry)o).getKey()) != null && - (v = e.getValue()) != null && - (k == this.key || k.equals(this.key)) && - (v == this.val || v.equals(this.val)); - } - - /** - * Sets our entry's value and writes through to the map. The - * value to return is somewhat arbitrary here. Since we do not - * necessarily track asynchronous changes, the most recent - * "previous" value could be different from what we return (or - * could even have been removed, in which case the put will - * re-establish). We do not and cannot guarantee more. - */ - @Override - public V setValue(V value) { - if (value == null) { - throw new NullPointerException(); - } - V v = this.val; - this.val = value; - this.map.put(this.key, value); - return v; - } - } - - static final class KeySpliterator extends Traverser - implements ConcurrentHashMapSpliterator { - long est; // size estimate - KeySpliterator(Node[] tab, int size, int index, int limit, - long est) { - super(tab, size, index, limit); - this.est = est; - } - - @Override - public ConcurrentHashMapSpliterator trySplit() { - int i, f, h; - return (h = (i = this.baseIndex) + (f = this.baseLimit) >>> 1) <= i ? null : - new KeySpliterator(this.tab, this.baseSize, this.baseLimit = h, - f, this.est >>>= 1); - } - - @Override - public void forEachRemaining(Action action) { - if (action == null) { - throw new NullPointerException(); - } - for (Node p; (p = advance()) != null;) { - action.apply(p.key); - } - } - - @Override - public boolean tryAdvance(Action action) { - if (action == null) { - throw new NullPointerException(); - } - Node p; - if ((p = advance()) == null) { - return false; - } - action.apply(p.key); - return true; - } - - @Override - public long estimateSize() { return this.est; } - - } - - static final class ValueSpliterator extends Traverser - implements ConcurrentHashMapSpliterator { - long est; // size estimate - ValueSpliterator(Node[] tab, int size, int index, int limit, - long est) { - super(tab, size, index, limit); - this.est = est; - } - - @Override - public ConcurrentHashMapSpliterator trySplit() { - int i, f, h; - return (h = (i = this.baseIndex) + (f = this.baseLimit) >>> 1) <= i ? null : - new ValueSpliterator(this.tab, this.baseSize, this.baseLimit = h, - f, this.est >>>= 1); - } - - @Override - public void forEachRemaining(Action action) { - if (action == null) { - throw new NullPointerException(); - } - for (Node p; (p = advance()) != null;) { - action.apply(p.val); - } - } - - @Override - public boolean tryAdvance(Action action) { - if (action == null) { - throw new NullPointerException(); - } - Node p; - if ((p = advance()) == null) { - return false; - } - action.apply(p.val); - return true; - } - - @Override - public long estimateSize() { return this.est; } - - } - - static final class EntrySpliterator extends Traverser - implements ConcurrentHashMapSpliterator> { - final ConcurrentHashMapV8 map; // To export MapEntry - long est; // size estimate - EntrySpliterator(Node[] tab, int size, int index, int limit, - long est, ConcurrentHashMapV8 map) { - super(tab, size, index, limit); - this.map = map; - this.est = est; - } - - @Override - public ConcurrentHashMapSpliterator> trySplit() { - int i, f, h; - return (h = (i = this.baseIndex) + (f = this.baseLimit) >>> 1) <= i ? null : - new EntrySpliterator(this.tab, this.baseSize, this.baseLimit = h, - f, this.est >>>= 1, this.map); - } - - @Override - public void forEachRemaining(Action> action) { - if (action == null) { - throw new NullPointerException(); - } - for (Node p; (p = advance()) != null; ) { - action.apply(new MapEntry(p.key, p.val, this.map)); - } - } - - @Override - public boolean tryAdvance(Action> action) { - if (action == null) { - throw new NullPointerException(); - } - Node p; - if ((p = advance()) == null) { - return false; - } - action.apply(new MapEntry(p.key, p.val, this.map)); - return true; - } - - @Override - public long estimateSize() { return this.est; } - - } - - /* ----------------Views -------------- */ - - /** - * Base class for views. - */ - abstract static class CollectionView - implements Collection, java.io.Serializable { - private static final long serialVersionUID = 7249069246763182397L; - final ConcurrentHashMapV8 map; - CollectionView(ConcurrentHashMapV8 map) { this.map = map; } - - /** - * Returns the map backing this view. - * - * @return the map backing this view - */ - public ConcurrentHashMapV8 getMap() { return this.map; } - - /** - * Removes all of the elements from this view, by removing all - * the mappings from the map backing this view. - */ - @Override - public final void clear() { this.map.clear(); } - @Override - public final int size() { return this.map.size(); } - @Override - public final boolean isEmpty() { return this.map.isEmpty(); } - - // implementations below rely on concrete classes supplying these - // abstract methods - /** - * Returns a "weakly consistent" iterator that will never - * throw {@link ConcurrentModificationException}, and - * guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not - * guaranteed to) reflect any modifications subsequent to - * construction. - */ - @Override - public abstract Iterator iterator(); - @Override - public abstract boolean contains(Object o); - @Override - public abstract boolean remove(Object o); - - private static final String oomeMsg = "Required array size too large"; - - @Override - public final Object[] toArray() { - long sz = this.map.mappingCount(); - if (sz > MAX_ARRAY_SIZE) { - throw new OutOfMemoryError(oomeMsg); - } - int n = (int)sz; - Object[] r = new Object[n]; - int i = 0; - for (E e : this) { - if (i == n) { - if (n >= MAX_ARRAY_SIZE) { - throw new OutOfMemoryError(oomeMsg); - } - if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) { - n = MAX_ARRAY_SIZE; - } else { - n += (n >>> 1) + 1; - } - r = Arrays.copyOf(r, n); - } - r[i++] = e; - } - return i == n ? r : Arrays.copyOf(r, i); - } - - @Override - @SuppressWarnings("unchecked") - public final T[] toArray(T[] a) { - long sz = this.map.mappingCount(); - if (sz > MAX_ARRAY_SIZE) { - throw new OutOfMemoryError(oomeMsg); - } - int m = (int)sz; - T[] r = a.length >= m ? a : - (T[])java.lang.reflect.Array - .newInstance(a.getClass().getComponentType(), m); - int n = r.length; - int i = 0; - for (E e : this) { - if (i == n) { - if (n >= MAX_ARRAY_SIZE) { - throw new OutOfMemoryError(oomeMsg); - } - if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) { - n = MAX_ARRAY_SIZE; - } else { - n += (n >>> 1) + 1; - } - r = Arrays.copyOf(r, n); - } - r[i++] = (T)e; - } - if (a == r && i < n) { - r[i] = null; // null-terminate - return r; - } - return i == n ? r : Arrays.copyOf(r, i); - } - - /** - * Returns a string representation of this collection. - * The string representation consists of the string representations - * of the collection's elements in the order they are returned by - * its iterator, enclosed in square brackets ({@code "[]"}). - * Adjacent elements are separated by the characters {@code ", "} - * (comma and space). Elements are converted to strings as by - * {@link String#valueOf(Object)}. - * - * @return a string representation of this collection - */ - @Override - public final String toString() { - StringBuilder sb = new StringBuilder(); - sb.append('['); - Iterator it = iterator(); - if (it.hasNext()) { - for (;;) { - Object e = it.next(); - sb.append(e == this ? "(this Collection)" : e); - if (!it.hasNext()) { - break; - } - sb.append(',').append(' '); - } - } - return sb.append(']').toString(); - } - - @Override - public final boolean containsAll(Collection c) { - if (c != this) { - for (Object e : c) { - if (e == null || !contains(e)) { - return false; - } - } - } - return true; - } - - @Override - public final boolean removeAll(Collection c) { - boolean modified = false; - for (Iterator it = iterator(); it.hasNext();) { - if (c.contains(it.next())) { - it.remove(); - modified = true; - } - } - return modified; - } - - @Override - public final boolean retainAll(Collection c) { - boolean modified = false; - for (Iterator it = iterator(); it.hasNext();) { - if (!c.contains(it.next())) { - it.remove(); - modified = true; - } - } - return modified; - } - - } - - /** - * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in - * which additions may optionally be enabled by mapping to a - * common value. This class cannot be directly instantiated. - * See {@link #keySet() keySet()}, - * {@link #keySet(Object) keySet(V)}, - * {@link #newKeySet() newKeySet()}, - * {@link #newKeySet(int) newKeySet(int)}. - * - * @since 1.8 - */ - public static class KeySetView extends CollectionView - implements Set, java.io.Serializable { - private static final long serialVersionUID = 7249069246763182397L; - private final V value; - KeySetView(ConcurrentHashMapV8 map, V value) { // non-public - super(map); - this.value = value; - } - - /** - * Returns the default mapped value for additions, - * or {@code null} if additions are not supported. - * - * @return the default mapped value for additions, or {@code null} - * if not supported - */ - public V getMappedValue() { return this.value; } - - /** - * {@inheritDoc} - * @throws NullPointerException if the specified key is null - */ - @Override - public boolean contains(Object o) { return this.map.containsKey(o); } - - /** - * Removes the key from this map view, by removing the key (and its - * corresponding value) from the backing map. This method does - * nothing if the key is not in the map. - * - * @param o the key to be removed from the backing map - * @return {@code true} if the backing map contained the specified key - * @throws NullPointerException if the specified key is null - */ - @Override - public boolean remove(Object o) { return this.map.remove(o) != null; } - - /** - * @return an iterator over the keys of the backing map - */ - @Override - public Iterator iterator() { - Node[] t; - ConcurrentHashMapV8 m = this.map; - int f = (t = m.table) == null ? 0 : t.length; - return new KeyIterator(t, f, 0, f, m); - } - - /** - * Adds the specified key to this set view by mapping the key to - * the default mapped value in the backing map, if defined. - * - * @param e key to be added - * @return {@code true} if this set changed as a result of the call - * @throws NullPointerException if the specified key is null - * @throws UnsupportedOperationException if no default mapped value - * for additions was provided - */ - @Override - public boolean add(K e) { - V v; - if ((v = this.value) == null) { - throw new UnsupportedOperationException(); - } - return this.map.putVal(e, v, true) == null; - } - - /** - * Adds all of the elements in the specified collection to this set, - * as if by calling {@link #add} on each one. - * - * @param c the elements to be inserted into this set - * @return {@code true} if this set changed as a result of the call - * @throws NullPointerException if the collection or any of its - * elements are {@code null} - * @throws UnsupportedOperationException if no default mapped value - * for additions was provided - */ - @Override - public boolean addAll(Collection c) { - boolean added = false; - V v; - if ((v = this.value) == null) { - throw new UnsupportedOperationException(); - } - for (K e : c) { - if (this.map.putVal(e, v, true) == null) { - added = true; - } - } - return added; - } - - @Override - public int hashCode() { - int h = 0; - for (K e : this) { - h += e.hashCode(); - } - return h; - } - - @Override - public boolean equals(Object o) { - Set c; - return o instanceof Set && - ((c = (Set)o) == this || - containsAll(c) && c.containsAll(this)); - } - - public ConcurrentHashMapSpliterator spliterator() { - Node[] t; - ConcurrentHashMapV8 m = this.map; - long n = m.sumCount(); - int f = (t = m.table) == null ? 0 : t.length; - return new KeySpliterator(t, f, 0, f, n < 0L ? 0L : n); - } - - public void forEach(Action action) { - if (action == null) { - throw new NullPointerException(); - } - Node[] t; - if ((t = this.map.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - action.apply(p.key); - } - } - } - } - - /** - * A view of a ConcurrentHashMapV8 as a {@link Collection} of - * values, in which additions are disabled. This class cannot be - * directly instantiated. See {@link #values()}. - */ - static final class ValuesView extends CollectionView - implements Collection, java.io.Serializable { - private static final long serialVersionUID = 2249069246763182397L; - ValuesView(ConcurrentHashMapV8 map) { super(map); } - @Override - public final boolean contains(Object o) { - return this.map.containsValue(o); - } - - @Override - public final boolean remove(Object o) { - if (o != null) { - for (Iterator it = iterator(); it.hasNext();) { - if (o.equals(it.next())) { - it.remove(); - return true; - } - } - } - return false; - } - - @Override - public final Iterator iterator() { - ConcurrentHashMapV8 m = this.map; - Node[] t; - int f = (t = m.table) == null ? 0 : t.length; - return new ValueIterator(t, f, 0, f, m); - } - - @Override - public final boolean add(V e) { - throw new UnsupportedOperationException(); - } - @Override - public final boolean addAll(Collection c) { - throw new UnsupportedOperationException(); - } - - public ConcurrentHashMapSpliterator spliterator() { - Node[] t; - ConcurrentHashMapV8 m = this.map; - long n = m.sumCount(); - int f = (t = m.table) == null ? 0 : t.length; - return new ValueSpliterator(t, f, 0, f, n < 0L ? 0L : n); - } - - public void forEach(Action action) { - if (action == null) { - throw new NullPointerException(); - } - Node[] t; - if ((t = this.map.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - action.apply(p.val); - } - } - } - } - - /** - * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) - * entries. This class cannot be directly instantiated. See - * {@link #entrySet()}. - */ - static final class EntrySetView extends CollectionView> - implements Set>, java.io.Serializable { - private static final long serialVersionUID = 2249069246763182397L; - EntrySetView(ConcurrentHashMapV8 map) { super(map); } - - @Override - public boolean contains(Object o) { - Object k, v, r; Map.Entry e; - return o instanceof Map.Entry && - (k = (e = (Map.Entry)o).getKey()) != null && - (r = this.map.get(k)) != null && - (v = e.getValue()) != null && - (v == r || v.equals(r)); - } - - @Override - public boolean remove(Object o) { - Object k, v; Map.Entry e; - return o instanceof Map.Entry && - (k = (e = (Map.Entry)o).getKey()) != null && - (v = e.getValue()) != null && - this.map.remove(k, v); - } - - /** - * @return an iterator over the entries of the backing map - */ - @Override - public Iterator> iterator() { - ConcurrentHashMapV8 m = this.map; - Node[] t; - int f = (t = m.table) == null ? 0 : t.length; - return new EntryIterator(t, f, 0, f, m); - } - - @Override - public boolean add(Entry e) { - return this.map.putVal(e.getKey(), e.getValue(), false) == null; - } - - @Override - public boolean addAll(Collection> c) { - boolean added = false; - for (Entry e : c) { - if (add(e)) { - added = true; - } - } - return added; - } - - @Override - public final int hashCode() { - int h = 0; - Node[] t; - if ((t = this.map.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - h += p.hashCode(); - } - } - return h; - } - - @Override - public final boolean equals(Object o) { - Set c; - return o instanceof Set && - ((c = (Set)o) == this || - containsAll(c) && c.containsAll(this)); - } - - public ConcurrentHashMapSpliterator> spliterator() { - Node[] t; - ConcurrentHashMapV8 m = this.map; - long n = m.sumCount(); - int f = (t = m.table) == null ? 0 : t.length; - return new EntrySpliterator(t, f, 0, f, n < 0L ? 0L : n, m); - } - - public void forEach(Action> action) { - if (action == null) { - throw new NullPointerException(); - } - Node[] t; - if ((t = this.map.table) != null) { - Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - action.apply(new MapEntry(p.key, p.val, this.map)); - } - } - } - - } - - /* ---------------- Counters -------------- */ - - // Adapted from LongAdder and Striped64. - // See their internal docs for explanation. - - // A padded cell for distributing counts - static final class CounterCell { - volatile long p0, p1, p2, p3, p4, p5, p6; - volatile long value; - volatile long q0, q1, q2, q3, q4, q5, q6; - CounterCell(long x) { this.value = x; } - } - - /** - * Holder for the thread-local hash code determining which - * CounterCell to use. The code is initialized via the - * counterHashCodeGenerator, but may be moved upon collisions. - */ - static final class CounterHashCode { - int code; - } - - /** - * Generates initial value for per-thread CounterHashCodes. - */ - static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); - - /** - * Increment for counterHashCodeGenerator. See class ThreadLocal - * for explanation. - */ - static final int SEED_INCREMENT = 0x61c88647; - - /** - * Per-thread counter hash codes. Shared across all instances. - */ - static final ThreadLocal threadCounterHashCode = - new ThreadLocal(); - - - final long sumCount() { - CounterCell[] as = this.counterCells; CounterCell a; - long sum = this.baseCount; - if (as != null) { - for (int i = 0; i < as.length; ++i) { - if ((a = as[i]) != null) { - sum += a.value; - } - } - } - return sum; - } - - // See LongAdder version for explanation - private final void fullAddCount(long x, CounterHashCode hc, - boolean wasUncontended) { - int h; - if (hc == null) { - hc = new CounterHashCode(); - int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT); - h = hc.code = s == 0 ? 1 : s; // Avoid zero - threadCounterHashCode.set(hc); - } else { - h = hc.code; - } - boolean collide = false; // True if last slot nonempty - for (;;) { - CounterCell[] as; CounterCell a; int n; long v; - if ((as = this.counterCells) != null && (n = as.length) > 0) { - if ((a = as[n - 1 & h]) == null) { - if (this.cellsBusy == 0) { // Try to attach new Cell - CounterCell r = new CounterCell(x); // Optimistic create - if (this.cellsBusy == 0 && - U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { - boolean created = false; - try { // Recheck under lock - CounterCell[] rs; int m, j; - if ((rs = this.counterCells) != null && - (m = rs.length) > 0 && - rs[j = m - 1 & h] == null) { - rs[j] = r; - created = true; - } - } finally { - this.cellsBusy = 0; - } - if (created) { - break; - } - continue; // Slot is now non-empty - } - } - collide = false; - } - else if (!wasUncontended) { - wasUncontended = true; // Continue after rehash - } else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) { - break; - } else if (this.counterCells != as || n >= NCPU) { - collide = false; // At max size or stale - } else if (!collide) { - collide = true; - } else if (this.cellsBusy == 0 && - U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { - try { - if (this.counterCells == as) {// Expand table unless stale - CounterCell[] rs = new CounterCell[n << 1]; - for (int i = 0; i < n; ++i) { - rs[i] = as[i]; - } - this.counterCells = rs; - } - } finally { - this.cellsBusy = 0; - } - collide = false; - continue; // Retry with expanded table - } - h ^= h << 13; // Rehash - h ^= h >>> 17; - h ^= h << 5; - } - else if (this.cellsBusy == 0 && this.counterCells == as && - U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { - boolean init = false; - try { // Initialize table - if (this.counterCells == as) { - CounterCell[] rs = new CounterCell[2]; - rs[h & 1] = new CounterCell(x); - this.counterCells = rs; - init = true; - } - } finally { - this.cellsBusy = 0; - } - if (init) { - break; - } - } - else if (U.compareAndSwapLong(this, BASECOUNT, v = this.baseCount, v + x)) - { - break; // Fall back on using base - } - } - hc.code = h; // Record index for next time - } - - // Unsafe mechanics - private static final sun.misc.Unsafe U; - private static final long SIZECTL; - private static final long TRANSFERINDEX; - private static final long TRANSFERORIGIN; - private static final long BASECOUNT; - private static final long CELLSBUSY; - private static final long CELLVALUE; - private static final long ABASE; - private static final int ASHIFT; - - static { - try { - U = getUnsafe(); - Class k = ConcurrentHashMapV8.class; - SIZECTL = U.objectFieldOffset - (k.getDeclaredField("sizeCtl")); - TRANSFERINDEX = U.objectFieldOffset - (k.getDeclaredField("transferIndex")); - TRANSFERORIGIN = U.objectFieldOffset - (k.getDeclaredField("transferOrigin")); - BASECOUNT = U.objectFieldOffset - (k.getDeclaredField("baseCount")); - CELLSBUSY = U.objectFieldOffset - (k.getDeclaredField("cellsBusy")); - Class ck = CounterCell.class; - CELLVALUE = U.objectFieldOffset - (ck.getDeclaredField("value")); - Class ak = Node[].class; - ABASE = U.arrayBaseOffset(ak); - int scale = U.arrayIndexScale(ak); - if ((scale & scale - 1) != 0) { - throw new Error("data type scale not a power of two"); - } - ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); - } catch (Exception e) { - throw new Error(e); - } - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - private static sun.misc.Unsafe getUnsafe() { - try { - return sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) {} - try { - return java.security.AccessController.doPrivileged - (new java.security.PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - }}); - } catch (java.security.PrivilegedActionException e) { - throw new RuntimeException("Could not initialize intrinsics", - e.getCause()); - } - } -} diff --git a/src/dorkbox/util/messagebus/common/adapter/Java6Adapter.java b/src/dorkbox/util/messagebus/common/adapter/Java6Adapter.java index 75c4118..46f28da 100644 --- a/src/dorkbox/util/messagebus/common/adapter/Java6Adapter.java +++ b/src/dorkbox/util/messagebus/common/adapter/Java6Adapter.java @@ -15,6 +15,7 @@ */ package dorkbox.util.messagebus.common.adapter; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; public @@ -23,6 +24,6 @@ class Java6Adapter implements MapAdapter { @Override public final ConcurrentMap concurrentMap(final int size, final float loadFactor, final int stripeSize) { - return new ConcurrentHashMapV8(size, loadFactor, stripeSize); + return new ConcurrentHashMap(size, loadFactor, stripeSize); } } diff --git a/src/dorkbox/util/messagebus/common/adapter/StampedLock.java b/src/dorkbox/util/messagebus/common/adapter/StampedLock.java deleted file mode 100644 index 3df59d6..0000000 --- a/src/dorkbox/util/messagebus/common/adapter/StampedLock.java +++ /dev/null @@ -1,1549 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - * - * Copyright 2015 dorkbox, llc - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dorkbox.util.messagebus.common.adapter; - -import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReadWriteLock; - -/** - * A capability-based lock with three modes for controlling read/write - * access. The state of a StampedLock consists of a version and mode. - * Lock acquisition methods return a stamp that represents and - * controls access with respect to a lock state; "try" versions of - * these methods may instead return the special value zero to - * represent failure to acquire access. Lock release and conversion - * methods require stamps as arguments, and fail if they do not match - * the state of the lock. The three modes are: - * - *

- * - *

This class also supports methods that conditionally provide - * conversions across the three modes. For example, method {@link - * #tryConvertToWriteLock} attempts to "upgrade" a mode, returning - * a valid write stamp if (1) already in writing mode (2) in reading - * mode and there are no other readers or (3) in optimistic mode and - * the lock is available. The forms of these methods are designed to - * help reduce some of the code bloat that otherwise occurs in - * retry-based designs. - * - *

StampedLocks are designed for use as internal utilities in the - * development of thread-safe components. Their use relies on - * knowledge of the internal properties of the data, objects, and - * methods they are protecting. They are not reentrant, so locked - * bodies should not call other unknown methods that may try to - * re-acquire locks (although you may pass a stamp to other methods - * that can use or convert it). The use of read lock modes relies on - * the associated code sections being side-effect-free. Unvalidated - * optimistic read sections cannot call methods that are not known to - * tolerate potential inconsistencies. Stamps use finite - * representations, and are not cryptographically secure (i.e., a - * valid stamp may be guessable). Stamp values may recycle after (no - * sooner than) one year of continuous operation. A stamp held without - * use or validation for longer than this period may fail to validate - * correctly. StampedLocks are serializable, but always deserialize - * into initial unlocked state, so they are not useful for remote - * locking. - * - *

The scheduling policy of StampedLock does not consistently - * prefer readers over writers or vice versa. All "try" methods are - * best-effort and do not necessarily conform to any scheduling or - * fairness policy. A zero return from any "try" method for acquiring - * or converting locks does not carry any information about the state - * of the lock; a subsequent invocation may succeed. - * - *

Because it supports coordinated usage across multiple lock - * modes, this class does not directly implement the {@link Lock} or - * {@link ReadWriteLock} interfaces. However, a StampedLock may be - * viewed {@link #asReadLock()}, {@link #asWriteLock()}, or {@link - * #asReadWriteLock()} in applications requiring only the associated - * set of functionality. - * - *

Sample Usage. The following illustrates some usage idioms - * in a class that maintains simple two-dimensional points. The sample - * code illustrates some try/catch conventions even though they are - * not strictly needed here because no exceptions can occur in their - * bodies.
- * - *

{@code
- * class Point {
- *   private double x, y;
- *   private final StampedLock sl = new StampedLock();
- *
- *   void move(double deltaX, double deltaY) { // an exclusively locked method
- *     long stamp = sl.writeLock();
- *     try {
- *       x += deltaX;
- *       y += deltaY;
- *     } finally {
- *       sl.unlockWrite(stamp);
- *     }
- *   }
- *
- *   double distanceFromOrigin() { // A read-only method
- *     long stamp = sl.tryOptimisticRead();
- *     double currentX = x, currentY = y;
- *     if (!sl.validate(stamp)) {
- *        stamp = sl.readLock();
- *        try {
- *          currentX = x;
- *          currentY = y;
- *        } finally {
- *           sl.unlockRead(stamp);
- *        }
- *     }
- *     return Math.sqrt(currentX * currentX + currentY * currentY);
- *   }
- *
- *   void moveIfAtOrigin(double newX, double newY) { // upgrade
- *     // Could instead start with optimistic, not read mode
- *     long stamp = sl.readLock();
- *     try {
- *       while (x == 0.0 && y == 0.0) {
- *         long ws = sl.tryConvertToWriteLock(stamp);
- *         if (ws != 0L) {
- *           stamp = ws;
- *           x = newX;
- *           y = newY;
- *           break;
- *         }
- *         else {
- *           sl.unlockRead(stamp);
- *           stamp = sl.writeLock();
- *         }
- *       }
- *     } finally {
- *       sl.unlock(stamp);
- *     }
- *   }
- * }}
- * - * @since 1.8 - * @author Doug Lea - */ -public class StampedLock implements java.io.Serializable { - /* - * Algorithmic notes: - * - * The design employs elements of Sequence locks - * (as used in linux kernels; see Lameter's - * http://www.lameter.com/gelato2005.pdf - * and elsewhere; see - * Boehm's http://www.hpl.hp.com/techreports/2012/HPL-2012-68.html) - * and Ordered RW locks (see Shirako et al - * http://dl.acm.org/citation.cfm?id=2312015) - * - * Conceptually, the primary state of the lock includes a sequence - * number that is odd when write-locked and even otherwise. - * However, this is offset by a reader count that is non-zero when - * read-locked. The read count is ignored when validating - * "optimistic" seqlock-reader-style stamps. Because we must use - * a small finite number of bits (currently 7) for readers, a - * supplementary reader overflow word is used when the number of - * readers exceeds the count field. We do this by treating the max - * reader count value (RBITS) as a spinlock protecting overflow - * updates. - * - * Waiters use a modified form of CLH lock used in - * AbstractQueuedSynchronizer (see its internal documentation for - * a fuller account), where each node is tagged (field mode) as - * either a reader or writer. Sets of waiting readers are grouped - * (linked) under a common node (field cowait) so act as a single - * node with respect to most CLH mechanics. By virtue of the - * queue structure, wait nodes need not actually carry sequence - * numbers; we know each is greater than its predecessor. This - * simplifies the scheduling policy to a mainly-FIFO scheme that - * incorporates elements of Phase-Fair locks (see Brandenburg & - * Anderson, especially http://www.cs.unc.edu/~bbb/diss/). In - * particular, we use the phase-fair anti-barging rule: If an - * incoming reader arrives while read lock is held but there is a - * queued writer, this incoming reader is queued. (This rule is - * responsible for some of the complexity of method acquireRead, - * but without it, the lock becomes highly unfair.) Method release - * does not (and sometimes cannot) itself wake up cowaiters. This - * is done by the primary thread, but helped by any other threads - * with nothing better to do in methods acquireRead and - * acquireWrite. - * - * These rules apply to threads actually queued. All tryLock forms - * opportunistically try to acquire locks regardless of preference - * rules, and so may "barge" their way in. Randomized spinning is - * used in the acquire methods to reduce (increasingly expensive) - * context switching while also avoiding sustained memory - * thrashing among many threads. We limit spins to the head of - * queue. A thread spin-waits up to SPINS times (where each - * iteration decreases spin count with 50% probability) before - * blocking. If, upon wakening it fails to obtain lock, and is - * still (or becomes) the first waiting thread (which indicates - * that some other thread barged and obtained lock), it escalates - * spins (up to MAX_HEAD_SPINS) to reduce the likelihood of - * continually losing to barging threads. - * - * Nearly all of these mechanics are carried out in methods - * acquireWrite and acquireRead, that, as typical of such code, - * sprawl out because actions and retries rely on consistent sets - * of locally cached reads. - * - * As noted in Boehm's paper (above), sequence validation (mainly - * method validate()) requires stricter ordering rules than apply - * to normal volatile reads (of "state"). In the absence of (but - * continual hope for) explicit JVM support of intrinsics with - * double-sided reordering prohibition, or corresponding fence - * intrinsics, we for now uncomfortably rely on the fact that the - * Unsafe.getXVolatile intrinsic must have this property - * (syntactic volatile reads do not) for internal purposes anyway, - * even though it is not documented. - * - * The memory layout keeps lock state and queue pointers together - * (normally on the same cache line). This usually works well for - * read-mostly loads. In most other cases, the natural tendency of - * adaptive-spin CLH locks to reduce memory contention lessens - * motivation to further spread out contended locations, but might - * be subject to future improvements. - */ - - private static final long serialVersionUID = -6001602636862214147L; - - /** Number of processors, for spin control */ - private static final int NCPU = Runtime.getRuntime().availableProcessors(); - - /** Maximum number of retries before enqueuing on acquisition */ - private static final int SPINS = NCPU > 1 ? 1 << 6 : 0; - - /** Maximum number of retries before blocking at head on acquisition */ - private static final int HEAD_SPINS = NCPU > 1 ? 1 << 10 : 0; - - /** Maximum number of retries before re-blocking */ - private static final int MAX_HEAD_SPINS = NCPU > 1 ? 1 << 16 : 0; - - /** The period for yielding when waiting for overflow spinlock */ - private static final int OVERFLOW_YIELD_RATE = 7; // must be power 2 - 1 - - /** The number of bits to use for reader count before overflowing */ - private static final int LG_READERS = 7; - - // Values for lock state and stamp operations - private static final long RUNIT = 1L; - private static final long WBIT = 1L << LG_READERS; - private static final long RBITS = WBIT - 1L; - private static final long RFULL = RBITS - 1L; - private static final long ABITS = RBITS | WBIT; - private static final long SBITS = ~RBITS; // note overlap with ABITS - - // Initial value for lock state; avoid failure value zero - private static final long ORIGIN = WBIT << 1; - - // Special value from cancelled acquire methods so caller can throw IE - private static final long INTERRUPTED = 1L; - - // Values for node status; order matters - private static final int WAITING = -1; - private static final int CANCELLED = 1; - - // Modes for nodes (int not boolean to allow arithmetic) - private static final int RMODE = 0; - private static final int WMODE = 1; - - private static final ThreadLocal randomThreadLocal = new ThreadLocal() { - @Override - protected - Random initialValue() { - return new Random(); - } - }; - - /** Wait nodes */ - static final class WNode { - volatile WNode prev; - volatile WNode next; - volatile WNode cowait; // list of linked readers - volatile Thread thread; // non-null while possibly parked - volatile int status; // 0, WAITING, or CANCELLED - final int mode; // RMODE or WMODE - WNode(int m, WNode p) { this.mode = m; this.prev = p; } - } - - /** Head of CLH queue */ - private transient volatile WNode whead; - /** Tail (last) of CLH queue */ - private transient volatile WNode wtail; - - // views - transient ReadLockView readLockView; - transient WriteLockView writeLockView; - transient ReadWriteLockView readWriteLockView; - - /** Lock sequence/state */ - private transient volatile long state; - /** extra reader count when state read count saturated */ - private transient int readerOverflow; - - - /** - * Creates a new lock, initially in unlocked state. - */ - public StampedLock() { - this.state = ORIGIN; - } - - /** - * Exclusively acquires the lock, blocking if necessary - * until available. - * - * @return a stamp that can be used to unlock or convert mode - */ - public long writeLock() { - long s, next; // bypass acquireWrite in fully unlocked case only - return ((s = this.state) & ABITS) == 0L && - U.compareAndSwapLong(this, STATE, s, next = s + WBIT) ? - next : acquireWrite(false, 0L); - } - - /** - * Exclusively acquires the lock if it is immediately available. - * - * @return a stamp that can be used to unlock or convert mode, - * or zero if the lock is not available - */ - public long tryWriteLock() { - long s, next; - return ((s = this.state) & ABITS) == 0L && - U.compareAndSwapLong(this, STATE, s, next = s + WBIT) ? - next : 0L; - } - - /** - * Exclusively acquires the lock if it is available within the - * given time and the current thread has not been interrupted. - * Behavior under timeout and interruption matches that specified - * for method {@link Lock#tryLock(long,TimeUnit)}. - * - * @param time the maximum time to wait for the lock - * @param unit the time unit of the {@code time} argument - * @return a stamp that can be used to unlock or convert mode, - * or zero if the lock is not available - * @throws InterruptedException if the current thread is interrupted - * before acquiring the lock - */ - public long tryWriteLock(long time, TimeUnit unit) - throws InterruptedException { - long nanos = unit.toNanos(time); - if (!Thread.interrupted()) { - long next, deadline; - if ((next = tryWriteLock()) != 0L) { - return next; - } - if (nanos <= 0L) { - return 0L; - } - if ((deadline = System.nanoTime() + nanos) == 0L) { - deadline = 1L; - } - if ((next = acquireWrite(true, deadline)) != INTERRUPTED) { - return next; - } - } - throw new InterruptedException(); - } - - /** - * Exclusively acquires the lock, blocking if necessary - * until available or the current thread is interrupted. - * Behavior under interruption matches that specified - * for method {@link Lock#lockInterruptibly()}. - * - * @return a stamp that can be used to unlock or convert mode - * @throws InterruptedException if the current thread is interrupted - * before acquiring the lock - */ - public long writeLockInterruptibly() throws InterruptedException { - long next; - if (!Thread.interrupted() && - (next = acquireWrite(true, 0L)) != INTERRUPTED) { - return next; - } - throw new InterruptedException(); - } - - /** - * Non-exclusively acquires the lock, blocking if necessary - * until available. - * - * @return a stamp that can be used to unlock or convert mode - */ - public long readLock() { - long s = this.state, next; // bypass acquireRead on common uncontended case - return this.whead == this.wtail && (s & ABITS) < RFULL && - U.compareAndSwapLong(this, STATE, s, next = s + RUNIT) ? - next : acquireRead(false, 0L); - } - - /** - * Non-exclusively acquires the lock if it is immediately available. - * - * @return a stamp that can be used to unlock or convert mode, - * or zero if the lock is not available - */ - public long tryReadLock() { - for (;;) { - long s, m, next; - if ((m = (s = this.state) & ABITS) == WBIT) { - return 0L; - } else if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) { - return next; - } - } - else if ((next = tryIncReaderOverflow(s)) != 0L) { - return next; - } - } - } - - /** - * Non-exclusively acquires the lock if it is available within the - * given time and the current thread has not been interrupted. - * Behavior under timeout and interruption matches that specified - * for method {@link Lock#tryLock(long,TimeUnit)}. - * - * @param time the maximum time to wait for the lock - * @param unit the time unit of the {@code time} argument - * @return a stamp that can be used to unlock or convert mode, - * or zero if the lock is not available - * @throws InterruptedException if the current thread is interrupted - * before acquiring the lock - */ - public long tryReadLock(long time, TimeUnit unit) - throws InterruptedException { - long s, m, next, deadline; - long nanos = unit.toNanos(time); - if (!Thread.interrupted()) { - if ((m = (s = this.state) & ABITS) != WBIT) { - if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) { - return next; - } - } - else if ((next = tryIncReaderOverflow(s)) != 0L) { - return next; - } - } - if (nanos <= 0L) { - return 0L; - } - if ((deadline = System.nanoTime() + nanos) == 0L) { - deadline = 1L; - } - if ((next = acquireRead(true, deadline)) != INTERRUPTED) { - return next; - } - } - throw new InterruptedException(); - } - - /** - * Non-exclusively acquires the lock, blocking if necessary - * until available or the current thread is interrupted. - * Behavior under interruption matches that specified - * for method {@link Lock#lockInterruptibly()}. - * - * @return a stamp that can be used to unlock or convert mode - * @throws InterruptedException if the current thread is interrupted - * before acquiring the lock - */ - public long readLockInterruptibly() throws InterruptedException { - long next; - if (!Thread.interrupted() && - (next = acquireRead(true, 0L)) != INTERRUPTED) { - return next; - } - throw new InterruptedException(); - } - - /** - * Returns a stamp that can later be validated, or zero - * if exclusively locked. - * - * @return a stamp, or zero if exclusively locked - */ - public long tryOptimisticRead() { - long s; - return ((s = this.state) & WBIT) == 0L ? s & SBITS : 0L; - } - - /** - * Returns true if the lock has not been exclusively acquired - * since issuance of the given stamp. Always returns false if the - * stamp is zero. Always returns true if the stamp represents a - * currently held lock. Invoking this method with a value not - * obtained from {@link #tryOptimisticRead} or a locking method - * for this lock has no defined effect or result. - * - * @param stamp a stamp - * @return {@code true} if the lock has not been exclusively acquired - * since issuance of the given stamp; else false - */ - public boolean validate(long stamp) { - // See above about current use of getLongVolatile here - return (stamp & SBITS) == (U.getLongVolatile(this, STATE) & SBITS); - } - - /** - * If the lock state matches the given stamp, releases the - * exclusive lock. - * - * @param stamp a stamp returned by a write-lock operation - * @throws IllegalMonitorStateException if the stamp does - * not match the current state of this lock - */ - public void unlockWrite(long stamp) { - WNode h; - if (this.state != stamp || (stamp & WBIT) == 0L) { - throw new IllegalMonitorStateException(); - } - this.state = (stamp += WBIT) == 0L ? ORIGIN : stamp; - if ((h = this.whead) != null && h.status != 0) { - release(h); - } - } - - /** - * If the lock state matches the given stamp, releases the - * non-exclusive lock. - * - * @param stamp a stamp returned by a read-lock operation - * @throws IllegalMonitorStateException if the stamp does - * not match the current state of this lock - */ - public void unlockRead(long stamp) { - long s, m; WNode h; - for (;;) { - if (((s = this.state) & SBITS) != (stamp & SBITS) || - (stamp & ABITS) == 0L || (m = s & ABITS) == 0L || m == WBIT) { - throw new IllegalMonitorStateException(); - } - if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) { - if (m == RUNIT && (h = this.whead) != null && h.status != 0) { - release(h); - } - break; - } - } - else if (tryDecReaderOverflow(s) != 0L) { - break; - } - } - } - - /** - * If the lock state matches the given stamp, releases the - * corresponding mode of the lock. - * - * @param stamp a stamp returned by a lock operation - * @throws IllegalMonitorStateException if the stamp does - * not match the current state of this lock - */ - public void unlock(long stamp) { - long a = stamp & ABITS, m, s; WNode h; - while (((s = this.state) & SBITS) == (stamp & SBITS)) { - if ((m = s & ABITS) == 0L) { - break; - } else if (m == WBIT) { - if (a != m) { - break; - } - this.state = (s += WBIT) == 0L ? ORIGIN : s; - if ((h = this.whead) != null && h.status != 0) { - release(h); - } - return; - } - else if (a == 0L || a >= WBIT) { - break; - } else if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) { - if (m == RUNIT && (h = this.whead) != null && h.status != 0) { - release(h); - } - return; - } - } - else if (tryDecReaderOverflow(s) != 0L) { - return; - } - } - throw new IllegalMonitorStateException(); - } - - /** - * If the lock state matches the given stamp, performs one of - * the following actions. If the stamp represents holding a write - * lock, returns it. Or, if a read lock, if the write lock is - * available, releases the read lock and returns a write stamp. - * Or, if an optimistic read, returns a write stamp only if - * immediately available. This method returns zero in all other - * cases. - * - * @param stamp a stamp - * @return a valid write stamp, or zero on failure - */ - public long tryConvertToWriteLock(long stamp) { - long a = stamp & ABITS, m, s, next; - while (((s = this.state) & SBITS) == (stamp & SBITS)) { - if ((m = s & ABITS) == 0L) { - if (a != 0L) { - break; - } - if (U.compareAndSwapLong(this, STATE, s, next = s + WBIT)) { - return next; - } - } - else if (m == WBIT) { - if (a != m) { - break; - } - return stamp; - } - else if (m == RUNIT && a != 0L) { - if (U.compareAndSwapLong(this, STATE, s, - next = s - RUNIT + WBIT)) { - return next; - } - } else { - break; - } - } - return 0L; - } - - /** - * If the lock state matches the given stamp, performs one of - * the following actions. If the stamp represents holding a write - * lock, releases it and obtains a read lock. Or, if a read lock, - * returns it. Or, if an optimistic read, acquires a read lock and - * returns a read stamp only if immediately available. This method - * returns zero in all other cases. - * - * @param stamp a stamp - * @return a valid read stamp, or zero on failure - */ - public long tryConvertToReadLock(long stamp) { - long a = stamp & ABITS, m, s, next; WNode h; - while (((s = this.state) & SBITS) == (stamp & SBITS)) { - if ((m = s & ABITS) == 0L) { - if (a != 0L) { - break; - } else if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) { - return next; - } - } - else if ((next = tryIncReaderOverflow(s)) != 0L) { - return next; - } - } - else if (m == WBIT) { - if (a != m) { - break; - } - this.state = next = s + WBIT + RUNIT; - if ((h = this.whead) != null && h.status != 0) { - release(h); - } - return next; - } - else if (a != 0L && a < WBIT) { - return stamp; - } else { - break; - } - } - return 0L; - } - - /** - * If the lock state matches the given stamp then, if the stamp - * represents holding a lock, releases it and returns an - * observation stamp. Or, if an optimistic read, returns it if - * validated. This method returns zero in all other cases, and so - * may be useful as a form of "tryUnlock". - * - * @param stamp a stamp - * @return a valid optimistic read stamp, or zero on failure - */ - public long tryConvertToOptimisticRead(long stamp) { - long a = stamp & ABITS, m, s, next; WNode h; - for (;;) { - s = U.getLongVolatile(this, STATE); // see above - if (((s = this.state) & SBITS) != (stamp & SBITS)) { - break; - } - if ((m = s & ABITS) == 0L) { - if (a != 0L) { - break; - } - return s; - } - else if (m == WBIT) { - if (a != m) { - break; - } - this.state = next = (s += WBIT) == 0L ? ORIGIN : s; - if ((h = this.whead) != null && h.status != 0) { - release(h); - } - return next; - } - else if (a == 0L || a >= WBIT) { - break; - } else if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, next = s - RUNIT)) { - if (m == RUNIT && (h = this.whead) != null && h.status != 0) { - release(h); - } - return next & SBITS; - } - } - else if ((next = tryDecReaderOverflow(s)) != 0L) { - return next & SBITS; - } - } - return 0L; - } - - /** - * Releases the write lock if it is held, without requiring a - * stamp value. This method may be useful for recovery after - * errors. - * - * @return {@code true} if the lock was held, else false - */ - public boolean tryUnlockWrite() { - long s; WNode h; - if (((s = this.state) & WBIT) != 0L) { - this.state = (s += WBIT) == 0L ? ORIGIN : s; - if ((h = this.whead) != null && h.status != 0) { - release(h); - } - return true; - } - return false; - } - - /** - * Releases one hold of the read lock if it is held, without - * requiring a stamp value. This method may be useful for recovery - * after errors. - * - * @return {@code true} if the read lock was held, else false - */ - public boolean tryUnlockRead() { - long s, m; WNode h; - while ((m = (s = this.state) & ABITS) != 0L && m < WBIT) { - if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) { - if (m == RUNIT && (h = this.whead) != null && h.status != 0) { - release(h); - } - return true; - } - } - else if (tryDecReaderOverflow(s) != 0L) { - return true; - } - } - return false; - } - - // status monitoring methods - - /** - * Returns combined state-held and overflow read count for given - * state s. - */ - private int getReadLockCount(long s) { - long readers; - if ((readers = s & RBITS) >= RFULL) { - readers = RFULL + this.readerOverflow; - } - return (int) readers; - } - - /** - * Returns {@code true} if the lock is currently held exclusively. - * - * @return {@code true} if the lock is currently held exclusively - */ - public boolean isWriteLocked() { - return (this.state & WBIT) != 0L; - } - - /** - * Returns {@code true} if the lock is currently held non-exclusively. - * - * @return {@code true} if the lock is currently held non-exclusively - */ - public boolean isReadLocked() { - return (this.state & RBITS) != 0L; - } - - /** - * Queries the number of read locks held for this lock. This - * method is designed for use in monitoring system state, not for - * synchronization control. - * @return the number of read locks held - */ - public int getReadLockCount() { - return getReadLockCount(this.state); - } - - /** - * Returns a string identifying this lock, as well as its lock - * state. The state, in brackets, includes the String {@code - * "Unlocked"} or the String {@code "Write-locked"} or the String - * {@code "Read-locks:"} followed by the current number of - * read-locks held. - * - * @return a string identifying this lock, as well as its lock state - */ - @Override - public String toString() { - long s = this.state; - return super.toString() + - ((s & ABITS) == 0L ? "[Unlocked]" : - (s & WBIT) != 0L ? "[Write-locked]" : - "[Read-locks:" + getReadLockCount(s) + "]"); - } - - // views - - /** - * Returns a plain {@link Lock} view of this StampedLock in which - * the {@link Lock#lock} method is mapped to {@link #readLock}, - * and similarly for other methods. The returned Lock does not - * support a {@link Condition}; method {@link - * Lock#newCondition()} throws {@code - * UnsupportedOperationException}. - * - * @return the lock - */ - public Lock asReadLock() { - ReadLockView v; - return (v = this.readLockView) != null ? v : - (this.readLockView = new ReadLockView()); - } - - /** - * Returns a plain {@link Lock} view of this StampedLock in which - * the {@link Lock#lock} method is mapped to {@link #writeLock}, - * and similarly for other methods. The returned Lock does not - * support a {@link Condition}; method {@link - * Lock#newCondition()} throws {@code - * UnsupportedOperationException}. - * - * @return the lock - */ - public Lock asWriteLock() { - WriteLockView v; - return (v = this.writeLockView) != null ? v : - (this.writeLockView = new WriteLockView()); - } - - /** - * Returns a {@link ReadWriteLock} view of this StampedLock in - * which the {@link ReadWriteLock#readLock()} method is mapped to - * {@link #asReadLock()}, and {@link ReadWriteLock#writeLock()} to - * {@link #asWriteLock()}. - * - * @return the lock - */ - public ReadWriteLock asReadWriteLock() { - ReadWriteLockView v; - return (v = this.readWriteLockView) != null ? v : - (this.readWriteLockView = new ReadWriteLockView()); - } - - // view classes - - final class ReadLockView implements Lock { - @Override - public void lock() { readLock(); } - @Override - public void lockInterruptibly() throws InterruptedException { - readLockInterruptibly(); - } - @Override - public boolean tryLock() { return tryReadLock() != 0L; } - @Override - public boolean tryLock(long time, TimeUnit unit) - throws InterruptedException { - return tryReadLock(time, unit) != 0L; - } - @Override - public void unlock() { unstampedUnlockRead(); } - @Override - public Condition newCondition() { - throw new UnsupportedOperationException(); - } - } - - final class WriteLockView implements Lock { - @Override - public void lock() { writeLock(); } - @Override - public void lockInterruptibly() throws InterruptedException { - writeLockInterruptibly(); - } - @Override - public boolean tryLock() { return tryWriteLock() != 0L; } - @Override - public boolean tryLock(long time, TimeUnit unit) - throws InterruptedException { - return tryWriteLock(time, unit) != 0L; - } - @Override - public void unlock() { unstampedUnlockWrite(); } - @Override - public Condition newCondition() { - throw new UnsupportedOperationException(); - } - } - - final class ReadWriteLockView implements ReadWriteLock { - @Override - public Lock readLock() { return asReadLock(); } - @Override - public Lock writeLock() { return asWriteLock(); } - } - - // Unlock methods without stamp argument checks for view classes. - // Needed because view-class lock methods throw away stamps. - - final void unstampedUnlockWrite() { - WNode h; long s; - if (((s = this.state) & WBIT) == 0L) { - throw new IllegalMonitorStateException(); - } - this.state = (s += WBIT) == 0L ? ORIGIN : s; - if ((h = this.whead) != null && h.status != 0) { - release(h); - } - } - - final void unstampedUnlockRead() { - for (;;) { - long s, m; WNode h; - if ((m = (s = this.state) & ABITS) == 0L || m >= WBIT) { - throw new IllegalMonitorStateException(); - } else if (m < RFULL) { - if (U.compareAndSwapLong(this, STATE, s, s - RUNIT)) { - if (m == RUNIT && (h = this.whead) != null && h.status != 0) { - release(h); - } - break; - } - } - else if (tryDecReaderOverflow(s) != 0L) { - break; - } - } - } - - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - s.defaultReadObject(); - this.state = ORIGIN; // reset to unlocked state - } - - // internals - - /** - * Tries to increment readerOverflow by first setting state - * access bits value to RBITS, indicating hold of spinlock, - * then updating, then releasing. - * - * @param s a reader overflow stamp: (s & ABITS) >= RFULL - * @return new stamp on success, else zero - */ - private long tryIncReaderOverflow(long s) { - // assert (s & ABITS) >= RFULL; - if ((s & ABITS) == RFULL) { - if (U.compareAndSwapLong(this, STATE, s, s | RBITS)) { - ++this.readerOverflow; - this.state = s; - return s; - } - } - else if ((randomThreadLocal.get().nextInt() & - OVERFLOW_YIELD_RATE) == 0) { - Thread.yield(); - } - return 0L; - } - - /** - * Tries to decrement readerOverflow. - * - * @param s a reader overflow stamp: (s & ABITS) >= RFULL - * @return new stamp on success, else zero - */ - private long tryDecReaderOverflow(long s) { - // assert (s & ABITS) >= RFULL; - if ((s & ABITS) == RFULL) { - if (U.compareAndSwapLong(this, STATE, s, s | RBITS)) { - int r; long next; - if ((r = this.readerOverflow) > 0) { - this.readerOverflow = r - 1; - next = s; - } else { - next = s - RUNIT; - } - this.state = next; - return next; - } - } - else if ((randomThreadLocal.get().nextInt() & - OVERFLOW_YIELD_RATE) == 0) { - Thread.yield(); - } - return 0L; - } - - /** - * Wakes up the successor of h (normally whead). This is normally - * just h.next, but may require traversal from wtail if next - * pointers are lagging. This may fail to wake up an acquiring - * thread when one or more have been cancelled, but the cancel - * methods themselves provide extra safeguards to ensure liveness. - */ - private void release(WNode h) { - if (h != null) { - WNode q; Thread w; - U.compareAndSwapInt(h, WSTATUS, WAITING, 0); - if ((q = h.next) == null || q.status == CANCELLED) { - for (WNode t = this.wtail; t != null && t != h; t = t.prev) { - if (t.status <= 0) { - q = t; - } - } - } - if (q != null && (w = q.thread) != null) { - U.unpark(w); - } - } - } - - /** - * See above for explanation. - * - * @param interruptible true if should check interrupts and if so - * return INTERRUPTED - * @param deadline if nonzero, the System.nanoTime value to timeout - * at (and return zero) - * @return next state, or INTERRUPTED - */ - private long acquireWrite(boolean interruptible, long deadline) { - WNode node = null, p; - for (int spins = -1;;) { // spin while enqueuing - long m, s, ns; - if ((m = (s = this.state) & ABITS) == 0L) { - if (U.compareAndSwapLong(this, STATE, s, ns = s + WBIT)) { - return ns; - } - } - else if (spins < 0) { - spins = m == WBIT && this.wtail == this.whead ? SPINS : 0; - } else if (spins > 0) { - if (randomThreadLocal.get().nextInt() >= 0) { - --spins; - } - } - else if ((p = this.wtail) == null) { // initialize queue - WNode hd = new WNode(WMODE, null); - if (U.compareAndSwapObject(this, WHEAD, null, hd)) { - this.wtail = hd; - } - } - else if (node == null) { - node = new WNode(WMODE, p); - } else if (node.prev != p) { - node.prev = p; - } else if (U.compareAndSwapObject(this, WTAIL, p, node)) { - p.next = node; - break; - } - } - - for (int spins = -1;;) { - WNode h, np, pp; int ps; - if ((h = this.whead) == p) { - if (spins < 0) { - spins = HEAD_SPINS; - } else if (spins < MAX_HEAD_SPINS) { - spins <<= 1; - } - for (int k = spins;;) { // spin at head - long s, ns; - if (((s = this.state) & ABITS) == 0L) { - if (U.compareAndSwapLong(this, STATE, s, - ns = s + WBIT)) { - this.whead = node; - node.prev = null; - return ns; - } - } - else if (randomThreadLocal.get().nextInt() >= 0 && - --k <= 0) { - break; - } - } - } - else if (h != null) { // help release stale waiters - WNode c; Thread w; - while ((c = h.cowait) != null) { - if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) && - (w = c.thread) != null) { - U.unpark(w); - } - } - } - if (this.whead == h) { - if ((np = node.prev) != p) { - if (np != null) - { - (p = np).next = node; // stale - } - } - else if ((ps = p.status) == 0) { - U.compareAndSwapInt(p, WSTATUS, 0, WAITING); - } else if (ps == CANCELLED) { - if ((pp = p.prev) != null) { - node.prev = pp; - pp.next = node; - } - } - else { - long time; // 0 argument to park means no timeout - if (deadline == 0L) { - time = 0L; - } else if ((time = deadline - System.nanoTime()) <= 0L) { - return cancelWaiter(node, node, false); - } - Thread wt = Thread.currentThread(); - U.putObject(wt, PARKBLOCKER, this); - node.thread = wt; - if (p.status < 0 && (p != h || (this.state & ABITS) != 0L) && - this.whead == h && node.prev == p) - { - U.park(false, time); // emulate LockSupport.park - } - node.thread = null; - U.putObject(wt, PARKBLOCKER, null); - if (interruptible && Thread.interrupted()) { - return cancelWaiter(node, node, true); - } - } - } - } - } - - /** - * See above for explanation. - * - * @param interruptible true if should check interrupts and if so - * return INTERRUPTED - * @param deadline if nonzero, the System.nanoTime value to timeout - * at (and return zero) - * @return next state, or INTERRUPTED - */ - private long acquireRead(boolean interruptible, long deadline) { - WNode node = null, p; - for (int spins = -1;;) { - WNode h; - if ((h = this.whead) == (p = this.wtail)) { - for (long m, s, ns;;) { - if ((m = (s = this.state) & ABITS) < RFULL ? - U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) : - m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L) { - return ns; - } else if (m >= WBIT) { - if (spins > 0) { - if (randomThreadLocal.get().nextInt() >= 0) { - --spins; - } - } - else { - if (spins == 0) { - WNode nh = this.whead, np = this.wtail; - if (nh == h && np == p || (h = nh) != (p = np)) { - break; - } - } - spins = SPINS; - } - } - } - } - if (p == null) { // initialize queue - WNode hd = new WNode(WMODE, null); - if (U.compareAndSwapObject(this, WHEAD, null, hd)) { - this.wtail = hd; - } - } - else if (node == null) { - node = new WNode(RMODE, p); - } else if (h == p || p.mode != RMODE) { - if (node.prev != p) { - node.prev = p; - } else if (U.compareAndSwapObject(this, WTAIL, p, node)) { - p.next = node; - break; - } - } - else if (!U.compareAndSwapObject(p, WCOWAIT, - node.cowait = p.cowait, node)) { - node.cowait = null; - } else { - for (;;) { - WNode pp, c; Thread w; - if ((h = this.whead) != null && (c = h.cowait) != null && - U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) && - (w = c.thread) != null) { - U.unpark(w); - } - if (h == (pp = p.prev) || h == p || pp == null) { - long m, s, ns; - do { - if ((m = (s = this.state) & ABITS) < RFULL ? - U.compareAndSwapLong(this, STATE, s, - ns = s + RUNIT) : - m < WBIT && - (ns = tryIncReaderOverflow(s)) != 0L) { - return ns; - } - } while (m < WBIT); - } - if (this.whead == h && p.prev == pp) { - long time; - if (pp == null || h == p || p.status > 0) { - node = null; // throw away - break; - } - if (deadline == 0L) { - time = 0L; - } else if ((time = deadline - System.nanoTime()) <= 0L) { - return cancelWaiter(node, p, false); - } - Thread wt = Thread.currentThread(); - U.putObject(wt, PARKBLOCKER, this); - node.thread = wt; - if ((h != pp || (this.state & ABITS) == WBIT) && - this.whead == h && p.prev == pp) { - U.park(false, time); - } - node.thread = null; - U.putObject(wt, PARKBLOCKER, null); - if (interruptible && Thread.interrupted()) { - return cancelWaiter(node, p, true); - } - } - } - } - } - - for (int spins = -1;;) { - WNode h, np, pp; int ps; - if ((h = this.whead) == p) { - if (spins < 0) { - spins = HEAD_SPINS; - } else if (spins < MAX_HEAD_SPINS) { - spins <<= 1; - } - for (int k = spins;;) { // spin at head - long m, s, ns; - if ((m = (s = this.state) & ABITS) < RFULL ? - U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) : - m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L) { - WNode c; Thread w; - this.whead = node; - node.prev = null; - while ((c = node.cowait) != null) { - if (U.compareAndSwapObject(node, WCOWAIT, - c, c.cowait) && - (w = c.thread) != null) { - U.unpark(w); - } - } - return ns; - } - else if (m >= WBIT && - randomThreadLocal.get().nextInt() >= 0 && --k <= 0) { - break; - } - } - } - else if (h != null) { - WNode c; Thread w; - while ((c = h.cowait) != null) { - if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) && - (w = c.thread) != null) { - U.unpark(w); - } - } - } - if (this.whead == h) { - if ((np = node.prev) != p) { - if (np != null) - { - (p = np).next = node; // stale - } - } - else if ((ps = p.status) == 0) { - U.compareAndSwapInt(p, WSTATUS, 0, WAITING); - } else if (ps == CANCELLED) { - if ((pp = p.prev) != null) { - node.prev = pp; - pp.next = node; - } - } - else { - long time; - if (deadline == 0L) { - time = 0L; - } else if ((time = deadline - System.nanoTime()) <= 0L) { - return cancelWaiter(node, node, false); - } - Thread wt = Thread.currentThread(); - U.putObject(wt, PARKBLOCKER, this); - node.thread = wt; - if (p.status < 0 && - (p != h || (this.state & ABITS) == WBIT) && - this.whead == h && node.prev == p) { - U.park(false, time); - } - node.thread = null; - U.putObject(wt, PARKBLOCKER, null); - if (interruptible && Thread.interrupted()) { - return cancelWaiter(node, node, true); - } - } - } - } - } - - /** - * If node non-null, forces cancel status and unsplices it from - * queue if possible and wakes up any cowaiters (of the node, or - * group, as applicable), and in any case helps release current - * first waiter if lock is free. (Calling with null arguments - * serves as a conditional form of release, which is not currently - * needed but may be needed under possible future cancellation - * policies). This is a variant of cancellation methods in - * AbstractQueuedSynchronizer (see its detailed explanation in AQS - * internal documentation). - * - * @param node if nonnull, the waiter - * @param group either node or the group node is cowaiting with - * @param interrupted if already interrupted - * @return INTERRUPTED if interrupted or Thread.interrupted, else zero - */ - private long cancelWaiter(WNode node, WNode group, boolean interrupted) { - if (node != null && group != null) { - Thread w; - node.status = CANCELLED; - // unsplice cancelled nodes from group - for (WNode p = group, q; (q = p.cowait) != null;) { - if (q.status == CANCELLED) { - U.compareAndSwapObject(p, WCOWAIT, q, q.cowait); - p = group; // restart - } else { - p = q; - } - } - if (group == node) { - for (WNode r = group.cowait; r != null; r = r.cowait) { - if ((w = r.thread) != null) - { - U.unpark(w); // wake up uncancelled co-waiters - } - } - for (WNode pred = node.prev; pred != null; ) { // unsplice - WNode succ, pp; // find valid successor - while ((succ = node.next) == null || - succ.status == CANCELLED) { - WNode q = null; // find successor the slow way - for (WNode t = this.wtail; t != null && t != node; t = t.prev) - { - if (t.status != CANCELLED) - { - q = t; // don't link if succ cancelled - } - } - if (succ == q || // ensure accurate successor - U.compareAndSwapObject(node, WNEXT, - succ, succ = q)) { - if (succ == null && node == this.wtail) { - U.compareAndSwapObject(this, WTAIL, node, pred); - } - break; - } - } - if (pred.next == node) { - U.compareAndSwapObject(pred, WNEXT, node, succ); - } - if (succ != null && (w = succ.thread) != null) { - succ.thread = null; - U.unpark(w); // wake up succ to observe new pred - } - if (pred.status != CANCELLED || (pp = pred.prev) == null) { - break; - } - node.prev = pp; // repeat if new pred wrong/cancelled - U.compareAndSwapObject(pp, WNEXT, pred, succ); - pred = pp; - } - } - } - WNode h; // Possibly release first waiter - while ((h = this.whead) != null) { - long s; WNode q; // similar to release() but check eligibility - if ((q = h.next) == null || q.status == CANCELLED) { - for (WNode t = this.wtail; t != null && t != h; t = t.prev) { - if (t.status <= 0) { - q = t; - } - } - } - if (h == this.whead) { - if (q != null && h.status == 0 && - ((s = this.state) & ABITS) != WBIT && // waiter is eligible - (s == 0L || q.mode == RMODE)) { - release(h); - } - break; - } - } - return interrupted || Thread.interrupted() ? INTERRUPTED : 0L; - } - - // Unsafe mechanics - private static final sun.misc.Unsafe U; - private static final long STATE; - private static final long WHEAD; - private static final long WTAIL; - private static final long WNEXT; - private static final long WSTATUS; - private static final long WCOWAIT; - private static final long PARKBLOCKER; - - static { - try { - U = getUnsafe(); - Class k = StampedLock.class; - Class wk = WNode.class; - STATE = U.objectFieldOffset - (k.getDeclaredField("state")); - WHEAD = U.objectFieldOffset - (k.getDeclaredField("whead")); - WTAIL = U.objectFieldOffset - (k.getDeclaredField("wtail")); - WSTATUS = U.objectFieldOffset - (wk.getDeclaredField("status")); - WNEXT = U.objectFieldOffset - (wk.getDeclaredField("next")); - WCOWAIT = U.objectFieldOffset - (wk.getDeclaredField("cowait")); - Class tk = Thread.class; - PARKBLOCKER = U.objectFieldOffset - (tk.getDeclaredField("parkBlocker")); - - } catch (Exception e) { - throw new Error(e); - } - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - private static sun.misc.Unsafe getUnsafe() { - try { - return sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) {} - try { - return java.security.AccessController.doPrivileged - (new java.security.PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - }}); - } catch (java.security.PrivilegedActionException e) { - throw new RuntimeException("Could not initialize intrinsics", - e.getCause()); - } - } -} diff --git a/src/dorkbox/util/messagebus/common/thread/ConcurrentSet.java b/src/dorkbox/util/messagebus/common/thread/ConcurrentSet.java index a9fc857..865d2e3 100644 --- a/src/dorkbox/util/messagebus/common/thread/ConcurrentSet.java +++ b/src/dorkbox/util/messagebus/common/thread/ConcurrentSet.java @@ -15,12 +15,11 @@ */ package dorkbox.util.messagebus.common.thread; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; - import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; @@ -49,7 +48,7 @@ class ConcurrentSet extends ConcurrentLinkedQueue2 { public ConcurrentSet(int size, float loadFactor, int stripeSize) { super(); - this.entries = JavaVersionAdapter.concurrentMap(size, loadFactor, 32); + this.entries = new ConcurrentHashMap<>(size, loadFactor, stripeSize); } @Override diff --git a/src/dorkbox/util/messagebus/publication/PublisherExactWithSuperTypes.java b/src/dorkbox/util/messagebus/publication/PublisherExactWithSuperTypes.java index bbbbf3b..178b336 100644 --- a/src/dorkbox/util/messagebus/publication/PublisherExactWithSuperTypes.java +++ b/src/dorkbox/util/messagebus/publication/PublisherExactWithSuperTypes.java @@ -39,6 +39,7 @@ class PublisherExactWithSuperTypes implements Publisher { public void publish(final Synchrony synchrony, final Object message1) { try { + final SubscriptionManager subManager = this.subManager; final Class messageClass = message1.getClass(); final Subscription[] subscriptions = subManager.getExactAndSuper(messageClass); // can return null diff --git a/src/dorkbox/util/messagebus/subscription/SubscriptionFactory.java b/src/dorkbox/util/messagebus/subscription/SubscriptionFactory.java new file mode 100644 index 0000000..21a1b66 --- /dev/null +++ b/src/dorkbox/util/messagebus/subscription/SubscriptionFactory.java @@ -0,0 +1,20 @@ +package dorkbox.util.messagebus.subscription; + +import com.lmax.disruptor.EventFactory; + +/** + * @author dorkbox, llc + * Date: 1/15/16 + */ +public class SubscriptionFactory implements EventFactory { + + public + SubscriptionFactory() { + } + + @Override + public + SubscriptionHolder newInstance() { + return new SubscriptionHolder(); + } +} diff --git a/src/dorkbox/util/messagebus/subscription/SubscriptionHandler.java b/src/dorkbox/util/messagebus/subscription/SubscriptionHandler.java new file mode 100644 index 0000000..3a94322 --- /dev/null +++ b/src/dorkbox/util/messagebus/subscription/SubscriptionHandler.java @@ -0,0 +1,49 @@ +package dorkbox.util.messagebus.subscription; + +import com.lmax.disruptor.LifecycleAware; +import com.lmax.disruptor.WorkHandler; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * @author dorkbox, llc + * Date: 2/2/15 + */ +public +class SubscriptionHandler implements WorkHandler, LifecycleAware { + private final SubscriptionManager subscriptionManager; + + AtomicBoolean shutdown = new AtomicBoolean(false); + + public + SubscriptionHandler(final SubscriptionManager subscriptionManager) { + this.subscriptionManager = subscriptionManager; + } + + @Override + public + void onEvent(final SubscriptionHolder event) throws Exception { + if (event.doSubscribe) { + subscriptionManager.subscribe(event.listener); + } + else { + subscriptionManager.unsubscribe(event.listener); + } + } + + @Override + public + void onStart() { + } + + @Override + public synchronized + void onShutdown() { + shutdown.set(true); + } + + public + boolean isShutdown() { + return shutdown.get(); + } +} diff --git a/src/dorkbox/util/messagebus/subscription/SubscriptionHolder.java b/src/dorkbox/util/messagebus/subscription/SubscriptionHolder.java new file mode 100644 index 0000000..e95b994 --- /dev/null +++ b/src/dorkbox/util/messagebus/subscription/SubscriptionHolder.java @@ -0,0 +1,13 @@ +package dorkbox.util.messagebus.subscription; + +/** + * @author dorkbox, llc Date: 1/15/16 + */ +public +class SubscriptionHolder { + public boolean doSubscribe; + public Object listener; + + public + SubscriptionHolder() {} +} diff --git a/src/dorkbox/util/messagebus/subscription/SubscriptionManager.java b/src/dorkbox/util/messagebus/subscription/SubscriptionManager.java index d0c9d3d..edeac46 100644 --- a/src/dorkbox/util/messagebus/subscription/SubscriptionManager.java +++ b/src/dorkbox/util/messagebus/subscription/SubscriptionManager.java @@ -17,7 +17,6 @@ package dorkbox.util.messagebus.subscription; import dorkbox.util.messagebus.common.HashMapTree; import dorkbox.util.messagebus.common.MessageHandler; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; import dorkbox.util.messagebus.error.ErrorHandlingSupport; import dorkbox.util.messagebus.utils.ClassUtils; import dorkbox.util.messagebus.utils.SubscriptionUtils; @@ -26,6 +25,7 @@ import dorkbox.util.messagebus.utils.VarArgUtils; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; @@ -45,13 +45,15 @@ public final class SubscriptionManager { public static final float LOAD_FACTOR = 0.8F; + // TODO: during startup, precalculate the number of subscription listeners and x2 to save as subsPerListener expected max size + + // ONLY used by SUB/UNSUB // remember already processed classes that do not contain any message handlers - private final Map, Boolean> nonListeners; + private final ConcurrentMap, Boolean> nonListeners; // all subscriptions per messageHandler type // this map provides fast access for subscribing and unsubscribing - // write access is synchronized and happens very infrequently // once a collection of subscriptions is stored it does not change private final ConcurrentMap, Subscription[]> subscriptionsPerListener; @@ -69,15 +71,8 @@ class SubscriptionManager { private final HashMapTree, ArrayList> subscriptionsPerMessageMulti; // shortcut publication if we know there is no possibility of varArg (ie: a method that has an array as arguments) - final AtomicBoolean varArgPossibility = new AtomicBoolean(false); + private final AtomicBoolean varArgPossibility = new AtomicBoolean(false); - ThreadLocal> listCache = new ThreadLocal>() { - @Override - protected - List initialValue() { - return new CopyOnWriteArrayList(); - } - }; private final ClassUtils classUtils; //NOTE for multiArg, can use the memory address concatenated with other ones and then just put it in the 'single" map (convert single to @@ -91,20 +86,18 @@ class SubscriptionManager { classUtils = new ClassUtils(SubscriptionManager.LOAD_FACTOR); // modified ONLY during SUB/UNSUB - this.nonListeners = JavaVersionAdapter.concurrentMap(4, LOAD_FACTOR, numberOfThreads); + this.nonListeners = new ConcurrentHashMap, Boolean>(4, LOAD_FACTOR, numberOfThreads); - // only used during SUB/UNSUB - this.subscriptionsPerListener = JavaVersionAdapter.concurrentMap(32, LOAD_FACTOR, numberOfThreads); + subscriptionsPerListener = new ConcurrentHashMap, Subscription[]>(32, LOAD_FACTOR, numberOfThreads); + subscriptionsPerMessageSingle = new ConcurrentHashMap, List>(32, LOAD_FACTOR, numberOfThreads); - - this.subscriptionsPerMessageSingle = JavaVersionAdapter.concurrentMap(32, LOAD_FACTOR, numberOfThreads); this.subscriptionsPerMessageMulti = new HashMapTree, ArrayList>(); - this.subUtils = new SubscriptionUtils(classUtils, numberOfThreads, LOAD_FACTOR); + this.subUtils = new SubscriptionUtils(classUtils, LOAD_FACTOR, numberOfThreads); // var arg subscriptions keep track of which subscriptions can handle varArgs. SUB/UNSUB dumps it, so it is recreated dynamically. // it's a hit on SUB/UNSUB, but improves performance of handlers - this.varArgUtils = new VarArgUtils(classUtils, numberOfThreads, LOAD_FACTOR); + this.varArgUtils = new VarArgUtils(classUtils, LOAD_FACTOR, numberOfThreads); } public @@ -115,16 +108,12 @@ class SubscriptionManager { this.subscriptionsPerMessageMulti.clear(); this.subscriptionsPerListener.clear(); - this.classUtils.clear(); + this.classUtils.shutdown(); clear(); } public void subscribe(final Object listener) { - if (listener == null) { - return; - } - // when subscribing, this is a GREAT opportunity to figure out the classes/objects loaded -- their hierarchy, AND generate UUIDs // for each CLASS that can be accessed. This then lets us lookup a UUID for each object that comes in -- if an ID is found (for // any part of it's object hierarchy) -- it means that we have that listeners for that object. this is MUCH faster checking if @@ -184,7 +173,6 @@ class SubscriptionManager { - final AtomicBoolean varArgPossibility = this.varArgPossibility; Subscription subscription; MessageHandler messageHandler; @@ -209,18 +197,14 @@ class SubscriptionManager { messageHandlerTypes = messageHandler.getHandledMessages(); handlerType = messageHandlerTypes[0]; - // using ThreadLocal cache's is SIGNIFICANTLY faster for subscribing to new types - final List cachedSubs = listCache.get(); - List subs = subsPerMessageSingle.putIfAbsent(handlerType, cachedSubs); - if (subs == null) { - listCache.set(new CopyOnWriteArrayList()); + if (!subsPerMessageSingle.containsKey(handlerType)) { + subsPerMessageSingle.put(handlerType, new CopyOnWriteArrayList()); } + // create the subscription. This can be thrown away if the subscription succeeds in another thread subscription = new Subscription(messageHandler); subscriptions[i] = subscription; - - // now add this subscription to each of the handled types } // now subsPerMessageSingle has a unique list of subscriptions for a specific handlerType, and MAY already have subscriptions @@ -264,11 +248,8 @@ class SubscriptionManager { public void unsubscribe(final Object listener) { - if (listener == null) { - return; - } - final Class listenerClass = listener.getClass(); + if (this.nonListeners.containsKey(listenerClass)) { // early reject of known classes that do not define message handlers return; @@ -302,7 +283,7 @@ class SubscriptionManager { public void clear() { this.subUtils.clear(); - this.varArgUtils.clear(); +// this.varArgUtils.clear(); } // inside a write lock @@ -326,21 +307,21 @@ class SubscriptionManager { return; } case 1: { - // using ThreadLocal cache's is SIGNIFICANTLY faster for subscribing to new types - final List cachedSubs = listCache.get(); - List subs = subsPerMessageSingle.putIfAbsent(type0, cachedSubs); - if (subs == null) { - listCache.set(new CopyOnWriteArrayList()); -// listCache.set(new ArrayList(8)); - subs = cachedSubs; - - // is this handler able to accept var args? - if (handler.getVarArgClass() != null) { - varArgPossibility.lazySet(true); - } - } - - subs.add(subscription); +// // using ThreadLocal cache's is SIGNIFICANTLY faster for subscribing to new types +// final List cachedSubs = listCache.get(); +// List subs = subsPerMessageSingle.putIfAbsent(type0, cachedSubs); +// if (subs == null) { +// listCache.set(new CopyOnWriteArrayList()); +//// listCache.set(new ArrayList(8)); +// subs = cachedSubs; +// +// // is this handler able to accept var args? +// if (handler.getVarArgClass() != null) { +// varArgPossibility.lazySet(true); +// } +// } +// +// subs.add(subscription); return; } case 2: { diff --git a/src/dorkbox/util/messagebus/subscription/WriterDistruptor.java b/src/dorkbox/util/messagebus/subscription/WriterDistruptor.java new file mode 100644 index 0000000..1e87bc2 --- /dev/null +++ b/src/dorkbox/util/messagebus/subscription/WriterDistruptor.java @@ -0,0 +1,159 @@ +package dorkbox.util.messagebus.subscription; + +import com.lmax.disruptor.EventFactory; +import com.lmax.disruptor.LiteBlockingWaitStrategy; +import com.lmax.disruptor.PhasedBackoffWaitStrategy; +import com.lmax.disruptor.RingBuffer; +import com.lmax.disruptor.Sequence; +import com.lmax.disruptor.SequenceBarrier; +import com.lmax.disruptor.Sequencer; +import com.lmax.disruptor.WaitStrategy; +import com.lmax.disruptor.WorkProcessor; +import dorkbox.util.messagebus.common.thread.NamedThreadFactory; +import dorkbox.util.messagebus.error.ErrorHandlingSupport; +import dorkbox.util.messagebus.publication.disruptor.PublicationExceptionHandler; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.LockSupport; + + +/** + * Objective of this class is to conform to the "single writer principle", in order to maintain CLEAN AND SIMPLE concurrency for the + * subscriptions. Even with concurrent hashMaps, there is still locks happening during contention. + */ +public +class WriterDistruptor { + + private WorkProcessor workProcessor; + private SubscriptionHandler handler; + private RingBuffer ringBuffer; + private Sequence workSequence; + + public + WriterDistruptor(final ErrorHandlingSupport errorHandler, final SubscriptionManager subscriptionManager) { + // Now we setup the disruptor and work handlers + + ExecutorService executor = new ThreadPoolExecutor(1, 1, + 0, TimeUnit.NANOSECONDS, // handlers are never idle, so this doesn't matter + new java.util.concurrent.LinkedTransferQueue(), + new NamedThreadFactory("MessageBus-Subscriber")); + + final PublicationExceptionHandler exceptionHandler = new PublicationExceptionHandler(errorHandler); + EventFactory factory = new SubscriptionFactory(); + + // setup the work handlers + handler = new SubscriptionHandler(subscriptionManager); + + +// final int BUFFER_SIZE = ringBufferSize * 64; + final int BUFFER_SIZE = 1024 * 64; +// final int BUFFER_SIZE = 1024; +// final int BUFFER_SIZE = 32; +// final int BUFFER_SIZE = 16; +// final int BUFFER_SIZE = 8; +// final int BUFFER_SIZE = 4; + + + WaitStrategy consumerWaitStrategy; +// consumerWaitStrategy = new LiteBlockingWaitStrategy(); // good one +// consumerWaitStrategy = new BlockingWaitStrategy(); +// consumerWaitStrategy = new YieldingWaitStrategy(); +// consumerWaitStrategy = new BusySpinWaitStrategy(); +// consumerWaitStrategy = new SleepingWaitStrategy(); +// consumerWaitStrategy = new PhasedBackoffWaitStrategy(20, 50, TimeUnit.MILLISECONDS, new SleepingWaitStrategy(0)); +// consumerWaitStrategy = new PhasedBackoffWaitStrategy(20, 50, TimeUnit.MILLISECONDS, new BlockingWaitStrategy()); + consumerWaitStrategy = new PhasedBackoffWaitStrategy(2, 5, TimeUnit.MILLISECONDS, new LiteBlockingWaitStrategy()); + + + ringBuffer = RingBuffer.createMultiProducer(factory, BUFFER_SIZE, consumerWaitStrategy); + SequenceBarrier sequenceBarrier = ringBuffer.newBarrier(); + + + // setup the WorkProcessors (these consume from the ring buffer -- one at a time) and tell the "handler" to execute the item + workSequence = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); + workProcessor = new WorkProcessor(ringBuffer, sequenceBarrier, handler, exceptionHandler, workSequence); + + + // setup the WorkProcessor sequences (control what is consumed from the ring buffer) + final Sequence[] sequences = getSequences(); + ringBuffer.addGatingSequences(sequences); + + + // configure the start position for the WorkProcessors, and start them + final long cursor = ringBuffer.getCursor(); + workSequence.set(cursor); + + workProcessor.getSequence() + .set(cursor); + + executor.execute(workProcessor); + } + + /** + * @param listener is never null + */ + public + void subscribe(final Object listener) { + long seq = ringBuffer.next(); + + SubscriptionHolder job = ringBuffer.get(seq); + job.doSubscribe = true; + job.listener = listener; + + ringBuffer.publish(seq); + } + + /** + * @param listener is never null + */ + public + void unsubscribe(final Object listener) { + long seq = ringBuffer.next(); + + SubscriptionHolder job = ringBuffer.get(seq); + job.doSubscribe = false; + job.listener = listener; + + ringBuffer.publish(seq); + } + + + // gets the sequences used for processing work + private + Sequence[] getSequences() { + final Sequence[] sequences = new Sequence[2]; + sequences[0] = workProcessor.getSequence(); + sequences[1] = workSequence; // always add the work sequence + return sequences; + } + + + public + void start() { + } + + public + void shutdown() { + workProcessor.halt(); + + while (!handler.isShutdown()) { + LockSupport.parkNanos(100L); // wait 100ms for handlers to quit + } + } + + public + boolean hasPendingMessages() { + // from workerPool.drainAndHalt() + Sequence[] workerSequences = getSequences(); + final long cursor = ringBuffer.getCursor(); + for (Sequence s : workerSequences) { + if (cursor > s.get()) { + return true; + } + } + + return false; + } +} diff --git a/src/dorkbox/util/messagebus/synchrony/AsyncDisruptor.java b/src/dorkbox/util/messagebus/synchrony/AsyncDisruptor.java index 940f5d6..28b3f8b 100644 --- a/src/dorkbox/util/messagebus/synchrony/AsyncDisruptor.java +++ b/src/dorkbox/util/messagebus/synchrony/AsyncDisruptor.java @@ -77,9 +77,10 @@ class AsyncDisruptor implements Synchrony { // setup the WorkProcessors (these consume from the ring buffer -- one at a time) and tell the "handler" to execute the item + workSequence = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); + final int numWorkers = handlers.length; workProcessors = new WorkProcessor[numWorkers]; - workSequence = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); for (int i = 0; i < numWorkers; i++) { workProcessors[i] = new WorkProcessor(ringBuffer, diff --git a/src/dorkbox/util/messagebus/utils/ClassUtils.java b/src/dorkbox/util/messagebus/utils/ClassUtils.java index c776927..ed15d03 100644 --- a/src/dorkbox/util/messagebus/utils/ClassUtils.java +++ b/src/dorkbox/util/messagebus/utils/ClassUtils.java @@ -15,11 +15,10 @@ */ package dorkbox.util.messagebus.utils; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; - import java.lang.reflect.Array; import java.util.ArrayList; import java.util.HashSet; +import java.util.IdentityHashMap; import java.util.Map; public final @@ -28,26 +27,30 @@ class ClassUtils { private final Map, Class> arrayCache; private final Map, Class[]> superClassesCache; + /** + * These data structures are never reset because the class hierarchy doesn't change at runtime + */ public ClassUtils(final float loadFactor) { - this.arrayCache = JavaVersionAdapter.concurrentMap(32, loadFactor, 1); - this.superClassesCache = JavaVersionAdapter.concurrentMap(32, loadFactor, 1); +// this.arrayCache = JavaVersionAdapter.concurrentMap(32, loadFactor, 1); +// this.superClassesCache = JavaVersionAdapter.concurrentMap(32, loadFactor, 1); + this.arrayCache = new IdentityHashMap, Class>(32); + this.superClassesCache = new IdentityHashMap, Class[]>(32); } /** - * never returns null - * never reset, since it never needs to be reset (as the class hierarchy doesn't change at runtime) - *

* if parameter clazz is of type array, then the super classes are of array type as well - *

- * protected by read lock by caller. The cache version is called first, by write lock + *

+ * race conditions will result in duplicate answers, which we don't care if happens + * never returns null + * never reset */ public Class[] getSuperClasses(final Class clazz) { // this is never reset, since it never needs to be. - final Map, Class[]> local = this.superClassesCache; + final Map, Class[]> cache = this.superClassesCache; - Class[] classes = local.get(clazz); + Class[] classes = cache.get(clazz); if (classes == null) { // publish all super types of class @@ -82,7 +85,7 @@ class ClassUtils { classes = new Class[newList.size()]; newList.toArray(classes); - local.put(clazz, classes); + cache.put(clazz, classes); } return classes; @@ -91,18 +94,18 @@ class ClassUtils { /** * race conditions will result in duplicate answers, which we don't care if happens * never returns null - * never reset + * never resets */ public Class getArrayClass(final Class c) { - final Map, Class> arrayCache = this.arrayCache; - Class clazz = arrayCache.get(c); + final Map, Class> cache = this.arrayCache; + Class clazz = cache.get(c); if (clazz == null) { // messy, but the ONLY way to do it. Array super types are also arrays final Object[] newInstance = (Object[]) Array.newInstance(c, 1); clazz = newInstance.getClass(); - arrayCache.put(c, clazz); + cache.put(c, clazz); } return clazz; @@ -110,10 +113,10 @@ class ClassUtils { /** - * Clears the caches + * Clears the caches, should only be called on shutdown */ public - void clear() { + void shutdown() { this.arrayCache.clear(); this.superClassesCache.clear(); } diff --git a/src/dorkbox/util/messagebus/utils/SubscriptionUtils.java b/src/dorkbox/util/messagebus/utils/SubscriptionUtils.java index fd06957..ea043d4 100644 --- a/src/dorkbox/util/messagebus/utils/SubscriptionUtils.java +++ b/src/dorkbox/util/messagebus/utils/SubscriptionUtils.java @@ -16,13 +16,13 @@ package dorkbox.util.messagebus.utils; import dorkbox.util.messagebus.common.HashMapTree; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; import dorkbox.util.messagebus.subscription.Subscription; import dorkbox.util.messagebus.subscription.SubscriptionManager; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; public final class SubscriptionUtils { @@ -36,13 +36,13 @@ class SubscriptionUtils { public - SubscriptionUtils(final ClassUtils superClass, final int numberOfThreads, final float loadFactor) { + SubscriptionUtils(final ClassUtils superClass, final float loadFactor, final int numberOfThreads) { this.superClass = superClass; // superClassSubscriptions keeps track of all subscriptions of super classes. SUB/UNSUB dumps it, so it is recreated dynamically. // it's a hit on SUB/UNSUB, but improves performance of handlers - this.superClassSubscriptions = JavaVersionAdapter.concurrentMap(8, loadFactor, numberOfThreads); + this.superClassSubscriptions = new ConcurrentHashMap, ArrayList>(8, loadFactor, numberOfThreads); this.superClassSubscriptionsMulti = new HashMapTree, ArrayList>(); } @@ -57,8 +57,6 @@ class SubscriptionUtils { * Returns an array COPY of the super subscriptions for the specified type. *

* This ALSO checks to see if the superClass accepts subtypes. - *

- * protected by read lock by caller * * @return CAN NOT RETURN NULL */ @@ -110,8 +108,6 @@ class SubscriptionUtils { * Returns an array COPY of the super subscriptions for the specified type. *

* This ALSO checks to see if the superClass accepts subtypes. - *

- * protected by read lock by caller * * @return CAN NOT RETURN NULL */ @@ -178,8 +174,6 @@ class SubscriptionUtils { * Returns an array COPY of the super subscriptions for the specified type. *

* This ALSO checks to see if the superClass accepts subtypes. - *

- * protected by read lock by caller * * @return CAN NOT RETURN NULL */ diff --git a/src/dorkbox/util/messagebus/utils/VarArgUtils.java b/src/dorkbox/util/messagebus/utils/VarArgUtils.java index e76f311..cebdf20 100644 --- a/src/dorkbox/util/messagebus/utils/VarArgUtils.java +++ b/src/dorkbox/util/messagebus/utils/VarArgUtils.java @@ -17,13 +17,13 @@ package dorkbox.util.messagebus.utils; import dorkbox.util.messagebus.common.HashMapTree; import dorkbox.util.messagebus.common.MessageHandler; -import dorkbox.util.messagebus.common.adapter.JavaVersionAdapter; import dorkbox.util.messagebus.subscription.Subscription; import dorkbox.util.messagebus.subscription.SubscriptionManager; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; public final class VarArgUtils { @@ -37,14 +37,14 @@ class VarArgUtils { public - VarArgUtils(final ClassUtils superClassUtils, final int numberOfThreads, final float loadFactor) { + VarArgUtils(final ClassUtils superClassUtils, final float loadFactor, final int numberOfThreads) { this.superClassUtils = superClassUtils; - this.varArgSubscriptionsSingle = JavaVersionAdapter.concurrentMap(16, loadFactor, numberOfThreads); + this.varArgSubscriptionsSingle = new ConcurrentHashMap, ArrayList>(16, loadFactor, numberOfThreads); this.varArgSubscriptionsMulti = new HashMapTree, ArrayList>(); - this.varArgSuperSubscriptionsSingle = JavaVersionAdapter.concurrentMap(16, loadFactor, numberOfThreads); + this.varArgSuperSubscriptionsSingle = new ConcurrentHashMap, ArrayList>(16, loadFactor, numberOfThreads); this.varArgSuperSubscriptionsMulti = new HashMapTree, ArrayList>(); }