entrySet;
+
+
+ /* ---------------- Public operations -------------- */
+
+ /**
+ * Creates a new, empty map with the default initial table size (16).
+ */
+ public ConcurrentHashMapV8() {
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size
+ * accommodating the specified number of elements without the need
+ * to dynamically resize.
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
+ */
+ public ConcurrentHashMapV8(int initialCapacity) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException();
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
+ this.sizeCtl = cap;
+ }
+
+ /**
+ * Creates a new map with the same mappings as the given map.
+ *
+ * @param m the map
+ */
+ public ConcurrentHashMapV8(Map extends K, ? extends V> m) {
+ this.sizeCtl = DEFAULT_CAPACITY;
+ putAll(m);
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}) and
+ * initial table density ({@code loadFactor}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative or the load factor is nonpositive
+ *
+ * @since 1.6
+ */
+ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) {
+ this(initialCapacity, loadFactor, 1);
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}), table
+ * density ({@code loadFactor}), and number of concurrently
+ * updating threads ({@code concurrencyLevel}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation may use this value as
+ * a sizing hint.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive
+ */
+ public ConcurrentHashMapV8(int initialCapacity,
+ float loadFactor, int concurrencyLevel) {
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
+ throw new IllegalArgumentException();
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
+ initialCapacity = concurrencyLevel; // as estimated threads
+ long size = (long)(1.0 + initialCapacity / loadFactor);
+ int cap = (size >= MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
+ this.sizeCtl = cap;
+ }
+
+ // Original (since JDK1.2) Map methods
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int size() {
+ long n = sumCount();
+ return ((n < 0L) ? 0 :
+ (n > Integer.MAX_VALUE) ? Integer.MAX_VALUE :
+ (int)n);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean isEmpty() {
+ return sumCount() <= 0L; // ignore transient negative values
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped,
+ * or {@code null} if this map contains no mapping for the key.
+ *
+ * More formally, if this map contains a mapping from a key
+ * {@code k} to a value {@code v} such that {@code key.equals(k)},
+ * then this method returns {@code v}; otherwise it returns
+ * {@code null}. (There can be at most one such mapping.)
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public V get(Object key) {
+ Node[] tab; Node e, p; int n, eh; K ek;
+ int h = spread(key.hashCode());
+ if ((tab = table) != null && (n = tab.length) > 0 &&
+ (e = tabAt(tab, (n - 1) & h)) != null) {
+ if ((eh = e.hash) == h) {
+ if ((ek = e.key) == key || (ek != null && key.equals(ek)))
+ return e.val;
+ }
+ else if (eh < 0)
+ return (p = e.find(h, key)) != null ? p.val : null;
+ while ((e = e.next) != null) {
+ if (e.hash == h &&
+ ((ek = e.key) == key || (ek != null && key.equals(ek))))
+ return e.val;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Tests if the specified object is a key in this table.
+ *
+ * @param key possible key
+ * @return {@code true} if and only if the specified object
+ * is a key in this table, as determined by the
+ * {@code equals} method; {@code false} otherwise
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public boolean containsKey(Object key) {
+ return get(key) != null;
+ }
+
+ /**
+ * Returns {@code true} if this map maps one or more keys to the
+ * specified value. Note: This method may require a full traversal
+ * of the map, and is much slower than method {@code containsKey}.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return {@code true} if this map maps one or more keys to the
+ * specified value
+ * @throws NullPointerException if the specified value is null
+ */
+ @Override
+ public boolean containsValue(Object value) {
+ if (value == null)
+ throw new NullPointerException();
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; ) {
+ V v;
+ if ((v = p.val) == value || (v != null && value.equals(v)))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Maps the specified key to the specified value in this table.
+ * Neither the key nor the value can be null.
+ *
+ * The value can be retrieved by calling the {@code get} method
+ * with a key that is equal to the original key.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V put(K key, V value) {
+ return putVal(key, value, false);
+ }
+
+ /** Implementation for put and putIfAbsent */
+ final V putVal(K key, V value, boolean onlyIfAbsent) {
+ if (key == null || value == null) throw new NullPointerException();
+ int hash = spread(key.hashCode());
+ int binCount = 0;
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
+ if (casTabAt(tab, i, null,
+ new Node(hash, key, value, null)))
+ break; // no lock when adding to empty bin
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ V oldVal = null;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node e = f;; ++binCount) {
+ K ek;
+ if (e.hash == hash &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ oldVal = e.val;
+ if (!onlyIfAbsent)
+ e.val = value;
+ break;
+ }
+ Node pred = e;
+ if ((e = e.next) == null) {
+ pred.next = new Node(hash, key,
+ value, null);
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ Node p;
+ binCount = 2;
+ if ((p = ((TreeBin)f).putTreeVal(hash, key,
+ value)) != null) {
+ oldVal = p.val;
+ if (!onlyIfAbsent)
+ p.val = value;
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ }
+ addCount(1L, binCount);
+ return null;
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this one.
+ * These mappings replace any mappings that this map had for any of the
+ * keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ */
+ @Override
+ public void putAll(Map extends K, ? extends V> m) {
+ tryPresize(m.size());
+ for (Entry extends K, ? extends V> e : m.entrySet())
+ putVal(e.getKey(), e.getValue(), false);
+ }
+
+ /**
+ * Removes the key (and its corresponding value) from this map.
+ * This method does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with {@code key}, or
+ * {@code null} if there was no mapping for {@code key}
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public V remove(Object key) {
+ return replaceNode(key, null, null);
+ }
+
+ /**
+ * Implementation for the four public remove/replace methods:
+ * Replaces node value with v, conditional upon match of cv if
+ * non-null. If resulting value is null, delete.
+ */
+ final V replaceNode(Object key, V value, Object cv) {
+ int hash = spread(key.hashCode());
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0 ||
+ (f = tabAt(tab, i = (n - 1) & hash)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ V oldVal = null;
+ boolean validated = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ validated = true;
+ for (Node e = f, pred = null;;) {
+ K ek;
+ if (e.hash == hash &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ V ev = e.val;
+ if (cv == null || cv == ev ||
+ (ev != null && cv.equals(ev))) {
+ oldVal = ev;
+ if (value != null)
+ e.val = value;
+ else if (pred != null)
+ pred.next = e.next;
+ else
+ setTabAt(tab, i, e.next);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ else if (f instanceof TreeBin) {
+ validated = true;
+ TreeBin t = (TreeBin)f;
+ TreeNode r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(hash, key, null)) != null) {
+ V pv = p.val;
+ if (cv == null || cv == pv ||
+ (pv != null && cv.equals(pv))) {
+ oldVal = pv;
+ if (value != null)
+ p.val = value;
+ else if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ }
+ if (validated) {
+ if (oldVal != null) {
+ if (value == null)
+ addCount(-1L, -1);
+ return oldVal;
+ }
+ break;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ */
+ @Override
+ public void clear() {
+ long delta = 0L; // negative number of deletions
+ int i = 0;
+ Node[] tab = table;
+ while (tab != null && i < tab.length) {
+ int fh;
+ Node f = tabAt(tab, i);
+ if (f == null)
+ ++i;
+ else if ((fh = f.hash) == MOVED) {
+ tab = helpTransfer(tab, f);
+ i = 0; // restart
+ }
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ Node p = (fh >= 0 ? f :
+ (f instanceof TreeBin) ?
+ ((TreeBin)f).first : null);
+ while (p != null) {
+ --delta;
+ p = p.next;
+ }
+ setTabAt(tab, i++, null);
+ }
+ }
+ }
+ }
+ if (delta != 0L)
+ addCount(delta, -1);
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from this map,
+ * via the {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations. It does not support the {@code add} or
+ * {@code addAll} operations.
+ *
+ * The view's {@code iterator} is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ *
+ * @return the set view
+ */
+ @Override
+ public KeySetView keySet() {
+ KeySetView ks;
+ return (ks = keySet) != null ? ks : (keySet = new KeySetView(this, null));
+ }
+
+ /**
+ * Returns a {@link Collection} view of the values contained in this map.
+ * The collection is backed by the map, so changes to the map are
+ * reflected in the collection, and vice-versa. The collection
+ * supports element removal, which removes the corresponding
+ * mapping from this map, via the {@code Iterator.remove},
+ * {@code Collection.remove}, {@code removeAll},
+ * {@code retainAll}, and {@code clear} operations. It does not
+ * support the {@code add} or {@code addAll} operations.
+ *
+ * The view's {@code iterator} is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ *
+ * @return the collection view
+ */
+ @Override
+ public Collection values() {
+ ValuesView vs;
+ return (vs = values) != null ? vs : (values = new ValuesView(this));
+ }
+
+ /**
+ * Returns a {@link Set} view of the mappings contained in this map.
+ * The set is backed by the map, so changes to the map are
+ * reflected in the set, and vice-versa. The set supports element
+ * removal, which removes the corresponding mapping from the map,
+ * via the {@code Iterator.remove}, {@code Set.remove},
+ * {@code removeAll}, {@code retainAll}, and {@code clear}
+ * operations.
+ *
+ * The view's {@code iterator} is a "weakly consistent" iterator
+ * that will never throw {@link ConcurrentModificationException},
+ * and guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not guaranteed to)
+ * reflect any modifications subsequent to construction.
+ *
+ * @return the set view
+ */
+ @Override
+ public Set> entrySet() {
+ EntrySetView es;
+ return (es = entrySet) != null ? es : (entrySet = new EntrySetView(this));
+ }
+
+ /**
+ * Returns the hash code value for this {@link Map}, i.e.,
+ * the sum of, for each key-value pair in the map,
+ * {@code key.hashCode() ^ value.hashCode()}.
+ *
+ * @return the hash code value for this map
+ */
+ @Override
+ public int hashCode() {
+ int h = 0;
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; )
+ h += p.key.hashCode() ^ p.val.hashCode();
+ }
+ return h;
+ }
+
+ /**
+ * Returns a string representation of this map. The string
+ * representation consists of a list of key-value mappings (in no
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
+ * mappings are separated by the characters {@code ", "} (comma
+ * and space). Each key-value mapping is rendered as the key
+ * followed by an equals sign ("{@code =}") followed by the
+ * associated value.
+ *
+ * @return a string representation of this map
+ */
+ @Override
+ public String toString() {
+ Node[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser it = new Traverser(t, f, 0, f);
+ StringBuilder sb = new StringBuilder();
+ sb.append('{');
+ Node p;
+ if ((p = it.advance()) != null) {
+ for (;;) {
+ K k = p.key;
+ V v = p.val;
+ sb.append(k == this ? "(this Map)" : k);
+ sb.append('=');
+ sb.append(v == this ? "(this Map)" : v);
+ if ((p = it.advance()) == null)
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append('}').toString();
+ }
+
+ /**
+ * Compares the specified object with this map for equality.
+ * Returns {@code true} if the given object is a map with the same
+ * mappings as this map. This operation may return misleading
+ * results if either map is concurrently modified during execution
+ * of this method.
+ *
+ * @param o object to be compared for equality with this map
+ * @return {@code true} if the specified object is equal to this map
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (o != this) {
+ if (!(o instanceof Map))
+ return false;
+ Map,?> m = (Map,?>) o;
+ Node[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser it = new Traverser(t, f, 0, f);
+ for (Node p; (p = it.advance()) != null; ) {
+ V val = p.val;
+ Object v = m.get(p.key);
+ if (v == null || (v != val && !v.equals(val)))
+ return false;
+ }
+ for (Entry,?> e : m.entrySet()) {
+ Object mk, mv, v;
+ if ((mk = e.getKey()) == null ||
+ (mv = e.getValue()) == null ||
+ (v = get(mk)) == null ||
+ (mv != v && !mv.equals(v)))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Stripped-down version of helper class used in previous version,
+ * declared for the sake of serialization compatibility
+ */
+ static class Segment extends ReentrantLock implements Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ final float loadFactor;
+ Segment(float lf) { this.loadFactor = lf; }
+ }
+
+ /**
+ * Saves the state of the {@code ConcurrentHashMapV8} instance to a
+ * stream (i.e., serializes it).
+ * @param s the stream
+ * @throws java.io.IOException if an I/O error occurs
+ * @serialData
+ * the key (Object) and value (Object)
+ * for each key-value mapping, followed by a null pair.
+ * The key-value mappings are emitted in no particular order.
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ // For serialization compatibility
+ // Emulate segment calculation from previous version of this class
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ int segmentShift = 32 - sshift;
+ int segmentMask = ssize - 1;
+ @SuppressWarnings("unchecked") Segment[] segments = (Segment[])
+ new Segment,?>[DEFAULT_CONCURRENCY_LEVEL];
+ for (int i = 0; i < segments.length; ++i)
+ segments[i] = new Segment(LOAD_FACTOR);
+ s.putFields().put("segments", segments);
+ s.putFields().put("segmentShift", segmentShift);
+ s.putFields().put("segmentMask", segmentMask);
+ s.writeFields();
+
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; ) {
+ s.writeObject(p.key);
+ s.writeObject(p.val);
+ }
+ }
+ s.writeObject(null);
+ s.writeObject(null);
+ segments = null; // throw away
+ }
+
+ /**
+ * Reconstitutes the instance from a stream (that is, deserializes it).
+ * @param s the stream
+ * @throws ClassNotFoundException if the class of a serialized object
+ * could not be found
+ * @throws java.io.IOException if an I/O error occurs
+ */
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ /*
+ * To improve performance in typical cases, we create nodes
+ * while reading, then place in table once size is known.
+ * However, we must also validate uniqueness and deal with
+ * overpopulated bins while doing so, which requires
+ * specialized versions of putVal mechanics.
+ */
+ sizeCtl = -1; // force exclusion for table construction
+ s.defaultReadObject();
+ long size = 0L;
+ Node p = null;
+ for (;;) {
+ @SuppressWarnings("unchecked") K k = (K) s.readObject();
+ @SuppressWarnings("unchecked") V v = (V) s.readObject();
+ if (k != null && v != null) {
+ p = new Node(spread(k.hashCode()), k, v, p);
+ ++size;
+ }
+ else
+ break;
+ }
+ if (size == 0L)
+ sizeCtl = 0;
+ else {
+ int n;
+ if (size >= MAXIMUM_CAPACITY >>> 1)
+ n = MAXIMUM_CAPACITY;
+ else {
+ int sz = (int)size;
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
+ }
+ @SuppressWarnings({"rawtypes","unchecked"})
+ Node[] tab = new Node[n];
+ int mask = n - 1;
+ long added = 0L;
+ while (p != null) {
+ boolean insertAtFront;
+ Node next = p.next, first;
+ int h = p.hash, j = h & mask;
+ if ((first = tabAt(tab, j)) == null)
+ insertAtFront = true;
+ else {
+ K k = p.key;
+ if (first.hash < 0) {
+ TreeBin t = (TreeBin)first;
+ if (t.putTreeVal(h, k, p.val) == null)
+ ++added;
+ insertAtFront = false;
+ }
+ else {
+ int binCount = 0;
+ insertAtFront = true;
+ Node q; K qk;
+ for (q = first; q != null; q = q.next) {
+ if (q.hash == h &&
+ ((qk = q.key) == k ||
+ (qk != null && k.equals(qk)))) {
+ insertAtFront = false;
+ break;
+ }
+ ++binCount;
+ }
+ if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
+ insertAtFront = false;
+ ++added;
+ p.next = first;
+ TreeNode hd = null, tl = null;
+ for (q = p; q != null; q = q.next) {
+ TreeNode t = new TreeNode
+ (q.hash, q.key, q.val, null, null);
+ if ((t.prev = tl) == null)
+ hd = t;
+ else
+ tl.next = t;
+ tl = t;
+ }
+ setTabAt(tab, j, new TreeBin(hd));
+ }
+ }
+ }
+ if (insertAtFront) {
+ ++added;
+ p.next = first;
+ setTabAt(tab, j, p);
+ }
+ p = next;
+ }
+ table = tab;
+ sizeCtl = n - (n >>> 2);
+ baseCount = added;
+ }
+ }
+
+ // ConcurrentMap methods
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or {@code null} if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V putIfAbsent(K key, V value) {
+ return putVal(key, value, true);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public boolean remove(Object key, Object value) {
+ if (key == null)
+ throw new NullPointerException();
+ return value != null && replaceNode(key, null, value) != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if any of the arguments are null
+ */
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (key == null || oldValue == null || newValue == null)
+ throw new NullPointerException();
+ return replaceNode(key, newValue, oldValue) != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key,
+ * or {@code null} if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V replace(K key, V value) {
+ if (key == null || value == null)
+ throw new NullPointerException();
+ return replaceNode(key, value, null);
+ }
+
+ // Overrides of JDK8+ Map extension method defaults
+
+ /**
+ * Returns the value to which the specified key is mapped, or the
+ * given default value if this map contains no mapping for the
+ * key.
+ *
+ * @param key the key whose associated value is to be returned
+ * @param defaultValue the value to return if this map contains
+ * no mapping for the given key
+ * @return the mapping for the key, if present; else the default value
+ * @throws NullPointerException if the specified key is null
+ */
+ public V getOrDefault(Object key, V defaultValue) {
+ V v;
+ return (v = get(key)) == null ? defaultValue : v;
+ }
+
+ public void forEach(BiAction super K, ? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; ) {
+ action.apply(p.key, p.val);
+ }
+ }
+ }
+
+ public void replaceAll(BiFun super K, ? super V, ? extends V> function) {
+ if (function == null) throw new NullPointerException();
+ Node[] t;
+ if ((t = table) != null) {
+ Traverser it = new Traverser(t, t.length, 0, t.length);
+ for (Node p; (p = it.advance()) != null; ) {
+ V oldValue = p.val;
+ for (K key = p.key;;) {
+ V newValue = function.apply(key, oldValue);
+ if (newValue == null)
+ throw new NullPointerException();
+ if (replaceNode(key, newValue, oldValue) != null ||
+ (oldValue = get(key)) == null)
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * If the specified key is not already associated with a value,
+ * attempts to compute its value using the given mapping function
+ * and enters it into this map unless {@code null}. The entire
+ * method invocation is performed atomically, so the function is
+ * applied at most once per key. Some attempted update operations
+ * on this map by other threads may be blocked while computation
+ * is in progress, so the computation should be short and simple,
+ * and must not attempt to update any other mappings of this map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param mappingFunction the function to compute a value
+ * @return the current (existing or computed) value associated with
+ * the specified key, or null if the computed value is null
+ * @throws NullPointerException if the specified key or mappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the mappingFunction does so,
+ * in which case the mapping is left unestablished
+ */
+ public V computeIfAbsent(K key, Fun super K, ? extends V> mappingFunction) {
+ if (key == null || mappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int binCount = 0;
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
+ Node r = new ReservationNode();
+ synchronized (r) {
+ if (casTabAt(tab, i, null, r)) {
+ binCount = 1;
+ Node node = null;
+ try {
+ if ((val = mappingFunction.apply(key)) != null)
+ node = new Node(h, key, val, null);
+ } finally {
+ setTabAt(tab, i, node);
+ }
+ }
+ }
+ if (binCount != 0)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ boolean added = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node e = f;; ++binCount) {
+ K ek; V ev;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = e.val;
+ break;
+ }
+ Node pred = e;
+ if ((e = e.next) == null) {
+ if ((val = mappingFunction.apply(key)) != null) {
+ added = true;
+ pred.next = new Node(h, key, val, null);
+ }
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 2;
+ TreeBin t = (TreeBin)f;
+ TreeNode r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(h, key, null)) != null)
+ val = p.val;
+ else if ((val = mappingFunction.apply(key)) != null) {
+ added = true;
+ t.putTreeVal(h, key, val);
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ if (!added)
+ return val;
+ break;
+ }
+ }
+ }
+ if (val != null)
+ addCount(1L, binCount);
+ return val;
+ }
+
+ /**
+ * If the value for the specified key is present, attempts to
+ * compute a new mapping given the key and its current mapped
+ * value. The entire method invocation is performed atomically.
+ * Some attempted update operations on this map by other threads
+ * may be blocked while computation is in progress, so the
+ * computation should be short and simple, and must not attempt to
+ * update any other mappings of this map.
+ *
+ * @param key key with which a value may be associated
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V computeIfPresent(K key, BiFun super K, ? super V, ? extends V> remappingFunction) {
+ if (key == null || remappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int delta = 0;
+ int binCount = 0;
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
+ break;
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node e = f, pred = null;; ++binCount) {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = remappingFunction.apply(key, e.val);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 2;
+ TreeBin t = (TreeBin)f;
+ TreeNode r, p;
+ if ((r = t.root) != null &&
+ (p = r.findTreeNode(h, key, null)) != null) {
+ val = remappingFunction.apply(key, p.val);
+ if (val != null)
+ p.val = val;
+ else {
+ delta = -1;
+ if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ }
+ if (binCount != 0)
+ break;
+ }
+ }
+ if (delta != 0)
+ addCount(delta, binCount);
+ return val;
+ }
+
+ /**
+ * Attempts to compute a mapping for the specified key and its
+ * current mapped value (or {@code null} if there is no current
+ * mapping). The entire method invocation is performed atomically.
+ * Some attempted update operations on this map by other threads
+ * may be blocked while computation is in progress, so the
+ * computation should be short and simple, and must not attempt to
+ * update any other mappings of this Map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V compute(K key,
+ BiFun super K, ? super V, ? extends V> remappingFunction) {
+ if (key == null || remappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int delta = 0;
+ int binCount = 0;
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
+ Node r = new ReservationNode();
+ synchronized (r) {
+ if (casTabAt(tab, i, null, r)) {
+ binCount = 1;
+ Node node = null;
+ try {
+ if ((val = remappingFunction.apply(key, null)) != null) {
+ delta = 1;
+ node = new Node(h, key, val, null);
+ }
+ } finally {
+ setTabAt(tab, i, node);
+ }
+ }
+ }
+ if (binCount != 0)
+ break;
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node e = f, pred = null;; ++binCount) {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = remappingFunction.apply(key, e.val);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ val = remappingFunction.apply(key, null);
+ if (val != null) {
+ delta = 1;
+ pred.next =
+ new Node(h, key, val, null);
+ }
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 1;
+ TreeBin t = (TreeBin)f;
+ TreeNode r, p;
+ if ((r = t.root) != null)
+ p = r.findTreeNode(h, key, null);
+ else
+ p = null;
+ V pv = (p == null) ? null : p.val;
+ val = remappingFunction.apply(key, pv);
+ if (val != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ delta = 1;
+ t.putTreeVal(h, key, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ break;
+ }
+ }
+ }
+ if (delta != 0)
+ addCount(delta, binCount);
+ return val;
+ }
+
+ /**
+ * If the specified key is not already associated with a
+ * (non-null) value, associates it with the given value.
+ * Otherwise, replaces the value with the results of the given
+ * remapping function, or removes if {@code null}. The entire
+ * method invocation is performed atomically. Some attempted
+ * update operations on this map by other threads may be blocked
+ * while computation is in progress, so the computation should be
+ * short and simple, and must not attempt to update any other
+ * mappings of this Map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value the value to use if absent
+ * @param remappingFunction the function to recompute a value if present
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or the
+ * remappingFunction is null
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V merge(K key, V value, BiFun super V, ? super V, ? extends V> remappingFunction) {
+ if (key == null || value == null || remappingFunction == null)
+ throw new NullPointerException();
+ int h = spread(key.hashCode());
+ V val = null;
+ int delta = 0;
+ int binCount = 0;
+ for (Node[] tab = table;;) {
+ Node f; int n, i, fh;
+ if (tab == null || (n = tab.length) == 0)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node(h, key, value, null))) {
+ delta = 1;
+ val = value;
+ break;
+ }
+ }
+ else if ((fh = f.hash) == MOVED)
+ tab = helpTransfer(tab, f);
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ if (fh >= 0) {
+ binCount = 1;
+ for (Node e = f, pred = null;; ++binCount) {
+ K ek;
+ if (e.hash == h &&
+ ((ek = e.key) == key ||
+ (ek != null && key.equals(ek)))) {
+ val = remappingFunction.apply(e.val, value);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ delta = 1;
+ val = value;
+ pred.next =
+ new Node(h, key, val, null);
+ break;
+ }
+ }
+ }
+ else if (f instanceof TreeBin) {
+ binCount = 2;
+ TreeBin t = (TreeBin)f;
+ TreeNode r = t.root;
+ TreeNode p = (r == null) ? null :
+ r.findTreeNode(h, key, null);
+ val = (p == null) ? value :
+ remappingFunction.apply(p.val, value);
+ if (val != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ delta = 1;
+ t.putTreeVal(h, key, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ if (t.removeTreeNode(p))
+ setTabAt(tab, i, untreeify(t.first));
+ }
+ }
+ }
+ }
+ if (binCount != 0) {
+ if (binCount >= TREEIFY_THRESHOLD)
+ treeifyBin(tab, i);
+ break;
+ }
+ }
+ }
+ if (delta != 0)
+ addCount(delta, binCount);
+ return val;
+ }
+
+ // Hashtable legacy methods
+
+ /**
+ * Legacy method testing if some key maps into the specified value
+ * in this table. This method is identical in functionality to
+ * {@link #containsValue(Object)}, and exists solely to ensure
+ * full compatibility with class {@link Hashtable},
+ * which supported this method prior to introduction of the
+ * Java Collections framework.
+ *
+ * @param value a value to search for
+ * @return {@code true} if and only if some key maps to the
+ * {@code value} argument in this table as
+ * determined by the {@code equals} method;
+ * {@code false} otherwise
+ * @throws NullPointerException if the specified value is null
+ */
+ @Deprecated public boolean contains(Object value) {
+ return containsValue(value);
+ }
+
+ /**
+ * Returns an enumeration of the keys in this table.
+ *
+ * @return an enumeration of the keys in this table
+ * @see #keySet()
+ */
+ public Enumeration keys() {
+ Node[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ return new KeyIterator(t, f, 0, f, this);
+ }
+
+ /**
+ * Returns an enumeration of the values in this table.
+ *
+ * @return an enumeration of the values in this table
+ * @see #values()
+ */
+ public Enumeration elements() {
+ Node[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ return new ValueIterator(t, f, 0, f, this);
+ }
+
+ // ConcurrentHashMapV8-only methods
+
+ /**
+ * Returns the number of mappings. This method should be used
+ * instead of {@link #size} because a ConcurrentHashMapV8 may
+ * contain more mappings than can be represented as an int. The
+ * value returned is an estimate; the actual count may differ if
+ * there are concurrent insertions or removals.
+ *
+ * @return the number of mappings
+ * @since 1.8
+ */
+ public long mappingCount() {
+ long n = sumCount();
+ return (n < 0L) ? 0L : n; // ignore transient negative values
+ }
+
+ /**
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
+ * from the given type to {@code Boolean.TRUE}.
+ *
+ * @return the new set
+ * @since 1.8
+ */
+ public static KeySetView newKeySet() {
+ return new KeySetView
+ (new ConcurrentHashMapV8(), Boolean.TRUE);
+ }
+
+ /**
+ * Creates a new {@link Set} backed by a ConcurrentHashMapV8
+ * from the given type to {@code Boolean.TRUE}.
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @return the new set
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
+ * @since 1.8
+ */
+ public static KeySetView newKeySet(int initialCapacity) {
+ return new KeySetView
+ (new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE);
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys in this map, using the
+ * given common mapped value for any additions (i.e., {@link
+ * Collection#add} and {@link Collection#addAll(Collection)}).
+ * This is of course only appropriate if it is acceptable to use
+ * the same value for all additions from this view.
+ *
+ * @param mappedValue the mapped value to use for any additions
+ * @return the set view
+ * @throws NullPointerException if the mappedValue is null
+ */
+ public KeySetView keySet(V mappedValue) {
+ if (mappedValue == null)
+ throw new NullPointerException();
+ return new KeySetView(this, mappedValue);
+ }
+
+ /* ---------------- Special Nodes -------------- */
+
+ /**
+ * A node inserted at head of bins during transfer operations.
+ */
+ static final class ForwardingNode extends Node {
+ final Node[] nextTable;
+ ForwardingNode(Node[] tab) {
+ super(MOVED, null, null, null);
+ this.nextTable = tab;
+ }
+
+ @Override
+ Node find(int h, Object k) {
+ // loop to avoid arbitrarily deep recursion on forwarding nodes
+ outer: for (Node[] tab = nextTable;;) {
+ Node e; int n;
+ if (k == null || tab == null || (n = tab.length) == 0 ||
+ (e = tabAt(tab, (n - 1) & h)) == null)
+ return null;
+ for (;;) {
+ int eh; K ek;
+ if ((eh = e.hash) == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ if (eh < 0) {
+ if (e instanceof ForwardingNode) {
+ tab = ((ForwardingNode)e).nextTable;
+ continue outer;
+ }
+ else
+ return e.find(h, k);
+ }
+ if ((e = e.next) == null)
+ return null;
+ }
+ }
+ }
+ }
+
+ /**
+ * A place-holder node used in computeIfAbsent and compute
+ */
+ static final class ReservationNode extends Node {
+ ReservationNode() {
+ super(RESERVED, null, null, null);
+ }
+
+ @Override
+ Node find(int h, Object k) {
+ return null;
+ }
+ }
+
+ /* ---------------- Table Initialization and Resizing -------------- */
+
+ /**
+ * Initializes table, using the size recorded in sizeCtl.
+ */
+ private final Node[] initTable() {
+ Node[] tab; int sc;
+ while ((tab = table) == null || tab.length == 0) {
+ if ((sc = sizeCtl) < 0)
+ Thread.yield(); // lost initialization race; just spin
+ else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if ((tab = table) == null || tab.length == 0) {
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
+ @SuppressWarnings({"rawtypes","unchecked"})
+ Node[] nt = new Node[n];
+ table = tab = nt;
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ break;
+ }
+ }
+ return tab;
+ }
+
+ /**
+ * Adds to count, and if table is too small and not already
+ * resizing, initiates transfer. If already resizing, helps
+ * perform transfer if work is available. Rechecks occupancy
+ * after a transfer to see if another resize is already needed
+ * because resizings are lagging additions.
+ *
+ * @param x the count to add
+ * @param check if <0, don't check resize, if <= 1 only check if uncontended
+ */
+ private final void addCount(long x, int check) {
+ CounterCell[] as; long b, s;
+ if ((as = counterCells) != null ||
+ !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
+ CounterHashCode hc; CounterCell a; long v; int m;
+ boolean uncontended = true;
+ if ((hc = threadCounterHashCode.get()) == null ||
+ as == null || (m = as.length - 1) < 0 ||
+ (a = as[m & hc.code]) == null ||
+ !(uncontended =
+ U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
+ fullAddCount(x, hc, uncontended);
+ return;
+ }
+ if (check <= 1)
+ return;
+ s = sumCount();
+ }
+ if (check >= 0) {
+ Node[] tab, nt; int sc;
+ while (s >= (sc = sizeCtl) && (tab = table) != null &&
+ tab.length < MAXIMUM_CAPACITY) {
+ if (sc < 0) {
+ if (sc == -1 || transferIndex <= transferOrigin ||
+ (nt = nextTable) == null)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
+ transfer(tab, nt);
+ }
+ else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
+ transfer(tab, null);
+ s = sumCount();
+ }
+ }
+ }
+
+ /**
+ * Helps transfer if a resize is in progress.
+ */
+ final Node[] helpTransfer(Node[] tab, Node f) {
+ Node[] nextTab; int sc;
+ if ((f instanceof ForwardingNode) &&
+ (nextTab = ((ForwardingNode)f).nextTable) != null) {
+ if (nextTab == nextTable && tab == table &&
+ transferIndex > transferOrigin && (sc = sizeCtl) < -1 &&
+ U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
+ transfer(tab, nextTab);
+ return nextTab;
+ }
+ return table;
+ }
+
+ /**
+ * Tries to presize table to accommodate the given number of elements.
+ *
+ * @param size number of elements (doesn't need to be perfectly accurate)
+ */
+ private final void tryPresize(int size) {
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
+ tableSizeFor(size + (size >>> 1) + 1);
+ int sc;
+ while ((sc = sizeCtl) >= 0) {
+ Node[] tab = table; int n;
+ if (tab == null || (n = tab.length) == 0) {
+ n = (sc > c) ? sc : c;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if (table == tab) {
+ @SuppressWarnings({"rawtypes","unchecked"})
+ Node[] nt = new Node[n];
+ table = nt;
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
+ break;
+ else if (tab == table &&
+ U.compareAndSwapInt(this, SIZECTL, sc, -2))
+ transfer(tab, null);
+ }
+ }
+
+ /**
+ * Moves and/or copies the nodes in each bin to new table. See
+ * above for explanation.
+ */
+ private final void transfer(Node[] tab, Node[] nextTab) {
+ int n = tab.length, stride;
+ if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
+ stride = MIN_TRANSFER_STRIDE; // subdivide range
+ if (nextTab == null) { // initiating
+ try {
+ @SuppressWarnings({"rawtypes","unchecked"})
+ Node[] nt = new Node[n << 1];
+ nextTab = nt;
+ } catch (Throwable ex) { // try to cope with OOME
+ sizeCtl = Integer.MAX_VALUE;
+ return;
+ }
+ nextTable = nextTab;
+ transferOrigin = n;
+ transferIndex = n;
+ ForwardingNode rev = new ForwardingNode(tab);
+ for (int k = n; k > 0;) { // progressively reveal ready slots
+ int nextk = (k > stride) ? k - stride : 0;
+ for (int m = nextk; m < k; ++m)
+ nextTab[m] = rev;
+ for (int m = n + nextk; m < n + k; ++m)
+ nextTab[m] = rev;
+ U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
+ }
+ }
+ int nextn = nextTab.length;
+ ForwardingNode fwd = new ForwardingNode(nextTab);
+ boolean advance = true;
+ boolean finishing = false; // to ensure sweep before committing nextTab
+ for (int i = 0, bound = 0;;) {
+ int nextIndex, nextBound, fh; Node f;
+ while (advance) {
+ if (--i >= bound || finishing)
+ advance = false;
+ else if ((nextIndex = transferIndex) <= transferOrigin) {
+ i = -1;
+ advance = false;
+ }
+ else if (U.compareAndSwapInt
+ (this, TRANSFERINDEX, nextIndex,
+ nextBound = (nextIndex > stride ?
+ nextIndex - stride : 0))) {
+ bound = nextBound;
+ i = nextIndex - 1;
+ advance = false;
+ }
+ }
+ if (i < 0 || i >= n || i + n >= nextn) {
+ if (finishing) {
+ nextTable = null;
+ table = nextTab;
+ sizeCtl = (n << 1) - (n >>> 1);
+ return;
+ }
+ for (int sc;;) {
+ if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
+ if (sc != -1)
+ return;
+ finishing = advance = true;
+ i = n; // recheck before commit
+ break;
+ }
+ }
+ }
+ else if ((f = tabAt(tab, i)) == null) {
+ if (casTabAt(tab, i, null, fwd)) {
+ setTabAt(nextTab, i, null);
+ setTabAt(nextTab, i + n, null);
+ advance = true;
+ }
+ }
+ else if ((fh = f.hash) == MOVED)
+ advance = true; // already processed
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ Node ln, hn;
+ if (fh >= 0) {
+ int runBit = fh & n;
+ Node lastRun = f;
+ for (Node p = f.next; p != null; p = p.next) {
+ int b = p.hash & n;
+ if (b != runBit) {
+ runBit = b;
+ lastRun = p;
+ }
+ }
+ if (runBit == 0) {
+ ln = lastRun;
+ hn = null;
+ }
+ else {
+ hn = lastRun;
+ ln = null;
+ }
+ for (Node p = f; p != lastRun; p = p.next) {
+ int ph = p.hash; K pk = p.key; V pv = p.val;
+ if ((ph & n) == 0)
+ ln = new Node(ph, pk, pv, ln);
+ else
+ hn = new Node(ph, pk, pv, hn);
+ }
+ setTabAt(nextTab, i, ln);
+ setTabAt(nextTab, i + n, hn);
+ setTabAt(tab, i, fwd);
+ advance = true;
+ }
+ else if (f instanceof TreeBin) {
+ TreeBin t = (TreeBin)f;
+ TreeNode lo = null, loTail = null;
+ TreeNode hi = null, hiTail = null;
+ int lc = 0, hc = 0;
+ for (Node e = t.first; e != null; e = e.next) {
+ int h = e.hash;
+ TreeNode p = new TreeNode
+ (h, e.key, e.val, null, null);
+ if ((h & n) == 0) {
+ if ((p.prev = loTail) == null)
+ lo = p;
+ else
+ loTail.next = p;
+ loTail = p;
+ ++lc;
+ }
+ else {
+ if ((p.prev = hiTail) == null)
+ hi = p;
+ else
+ hiTail.next = p;
+ hiTail = p;
+ ++hc;
+ }
+ }
+ ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
+ (hc != 0) ? new TreeBin(lo) : t;
+ hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
+ (lc != 0) ? new TreeBin(hi) : t;
+ setTabAt(nextTab, i, ln);
+ setTabAt(nextTab, i + n, hn);
+ setTabAt(tab, i, fwd);
+ advance = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* ---------------- Conversion from/to TreeBins -------------- */
+
+ /**
+ * Replaces all linked nodes in bin at given index unless table is
+ * too small, in which case resizes instead.
+ */
+ private final void treeifyBin(Node[] tab, int index) {
+ Node b; int n, sc;
+ if (tab != null) {
+ if ((n = tab.length) < MIN_TREEIFY_CAPACITY) {
+ if (tab == table && (sc = sizeCtl) >= 0 &&
+ U.compareAndSwapInt(this, SIZECTL, sc, -2))
+ transfer(tab, null);
+ }
+ else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
+ synchronized (b) {
+ if (tabAt(tab, index) == b) {
+ TreeNode hd = null, tl = null;
+ for (Node e = b; e != null; e = e.next) {
+ TreeNode p =
+ new TreeNode(e.hash, e.key, e.val,
+ null, null);
+ if ((p.prev = tl) == null)
+ hd = p;
+ else
+ tl.next = p;
+ tl = p;
+ }
+ setTabAt(tab, index, new TreeBin(hd));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns a list on non-TreeNodes replacing those in given list.
+ */
+ static Node untreeify(Node b) {
+ Node hd = null, tl = null;
+ for (Node q = b; q != null; q = q.next) {
+ Node p = new Node(q.hash, q.key, q.val, null);
+ if (tl == null)
+ hd = p;
+ else
+ tl.next = p;
+ tl = p;
+ }
+ return hd;
+ }
+
+ /* ---------------- TreeNodes -------------- */
+
+ /**
+ * Nodes for use in TreeBins
+ */
+ static final class TreeNode extends Node {
+ TreeNode parent; // red-black tree links
+ TreeNode left;
+ TreeNode right;
+ TreeNode prev; // needed to unlink next upon deletion
+ boolean red;
+
+ TreeNode(int hash, K key, V val, Node next,
+ TreeNode parent) {
+ super(hash, key, val, next);
+ this.parent = parent;
+ }
+
+ @Override
+ Node find(int h, Object k) {
+ return findTreeNode(h, k, null);
+ }
+
+ /**
+ * Returns the TreeNode (or null if not found) for the given key
+ * starting at given root.
+ */
+ final TreeNode findTreeNode(int h, Object k, Class> kc) {
+ if (k != null) {
+ TreeNode p = this;
+ do {
+ int ph, dir; K pk; TreeNode q;
+ TreeNode pl = p.left, pr = p.right;
+ if ((ph = p.hash) > h)
+ p = pl;
+ else if (ph < h)
+ p = pr;
+ else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
+ return p;
+ else if (pl == null)
+ p = pr;
+ else if (pr == null)
+ p = pl;
+ else if ((kc != null ||
+ (kc = comparableClassFor(k)) != null) &&
+ (dir = compareComparables(kc, k, pk)) != 0)
+ p = (dir < 0) ? pl : pr;
+ else if ((q = pr.findTreeNode(h, k, kc)) != null)
+ return q;
+ else
+ p = pl;
+ } while (p != null);
+ }
+ return null;
+ }
+ }
+
+ /* ---------------- TreeBins -------------- */
+
+ /**
+ * TreeNodes used at the heads of bins. TreeBins do not hold user
+ * keys or values, but instead point to list of TreeNodes and
+ * their root. They also maintain a parasitic read-write lock
+ * forcing writers (who hold bin lock) to wait for readers (who do
+ * not) to complete before tree restructuring operations.
+ */
+ static final class TreeBin extends Node {
+ TreeNode root;
+ volatile TreeNode first;
+ volatile Thread waiter;
+ volatile int lockState;
+ // values for lockState
+ static final int WRITER = 1; // set while holding write lock
+ static final int WAITER = 2; // set when waiting for write lock
+ static final int READER = 4; // increment value for setting read lock
+
+ /**
+ * Tie-breaking utility for ordering insertions when equal
+ * hashCodes and non-comparable. We don't require a total
+ * order, just a consistent insertion rule to maintain
+ * equivalence across rebalancings. Tie-breaking further than
+ * necessary simplifies testing a bit.
+ */
+ static int tieBreakOrder(Object a, Object b) {
+ int d;
+ if (a == null || b == null ||
+ (d = a.getClass().getName().
+ compareTo(b.getClass().getName())) == 0)
+ d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
+ -1 : 1);
+ return d;
+ }
+
+ /**
+ * Creates bin with initial set of nodes headed by b.
+ */
+ TreeBin(TreeNode b) {
+ super(TREEBIN, null, null, null);
+ this.first = b;
+ TreeNode r = null;
+ for (TreeNode x = b, next; x != null; x = next) {
+ next = (TreeNode)x.next;
+ x.left = x.right = null;
+ if (r == null) {
+ x.parent = null;
+ x.red = false;
+ r = x;
+ }
+ else {
+ K k = x.key;
+ int h = x.hash;
+ Class> kc = null;
+ for (TreeNode p = r;;) {
+ int dir, ph;
+ K pk = p.key;
+ if ((ph = p.hash) > h)
+ dir = -1;
+ else if (ph < h)
+ dir = 1;
+ else if ((kc == null &&
+ (kc = comparableClassFor(k)) == null) ||
+ (dir = compareComparables(kc, k, pk)) == 0)
+ dir = tieBreakOrder(k, pk);
+ TreeNode xp = p;
+ if ((p = (dir <= 0) ? p.left : p.right) == null) {
+ x.parent = xp;
+ if (dir <= 0)
+ xp.left = x;
+ else
+ xp.right = x;
+ r = balanceInsertion(r, x);
+ break;
+ }
+ }
+ }
+ }
+ this.root = r;
+ assert checkInvariants(root);
+ }
+
+ /**
+ * Acquires write lock for tree restructuring.
+ */
+ private final void lockRoot() {
+ if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
+ contendedLock(); // offload to separate method
+ }
+
+ /**
+ * Releases write lock for tree restructuring.
+ */
+ private final void unlockRoot() {
+ lockState = 0;
+ }
+
+ /**
+ * Possibly blocks awaiting root lock.
+ */
+ private final void contendedLock() {
+ boolean waiting = false;
+ for (int s;;) {
+ if (((s = lockState) & WRITER) == 0) {
+ if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
+ if (waiting)
+ waiter = null;
+ return;
+ }
+ }
+ else if ((s | WAITER) == 0) {
+ if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
+ waiting = true;
+ waiter = Thread.currentThread();
+ }
+ }
+ else if (waiting)
+ LockSupport.park(this);
+ }
+ }
+
+ /**
+ * Returns matching node or null if none. Tries to search
+ * using tree comparisons from root, but continues linear
+ * search when lock not available.
+ */
+@Override
+final Node find(int h, Object k) {
+ if (k != null) {
+ for (Node e = first; e != null; e = e.next) {
+ int s; K ek;
+ if (((s = lockState) & (WAITER|WRITER)) != 0) {
+ if (e.hash == h &&
+ ((ek = e.key) == k || (ek != null && k.equals(ek))))
+ return e;
+ }
+ else if (U.compareAndSwapInt(this, LOCKSTATE, s,
+ s + READER)) {
+ TreeNode r, p;
+ try {
+ p = ((r = root) == null ? null :
+ r.findTreeNode(h, k, null));
+ } finally {
+ Thread w;
+ int ls;
+ do {} while (!U.compareAndSwapInt
+ (this, LOCKSTATE,
+ ls = lockState, ls - READER));
+ if (ls == (READER|WAITER) && (w = waiter) != null)
+ LockSupport.unpark(w);
+ }
+ return p;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Finds or adds a node.
+ * @return null if added
+ */
+ final TreeNode putTreeVal(int h, K k, V v) {
+ Class> kc = null;
+ boolean searched = false;
+ for (TreeNode p = root;;) {
+ int dir, ph; K pk;
+ if (p == null) {
+ first = root = new TreeNode(h, k, v, null, null);
+ break;
+ }
+ else if ((ph = p.hash) > h)
+ dir = -1;
+ else if (ph < h)
+ dir = 1;
+ else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
+ return p;
+ else if ((kc == null &&
+ (kc = comparableClassFor(k)) == null) ||
+ (dir = compareComparables(kc, k, pk)) == 0) {
+ if (!searched) {
+ TreeNode q, ch;
+ searched = true;
+ if (((ch = p.left) != null &&
+ (q = ch.findTreeNode(h, k, kc)) != null) ||
+ ((ch = p.right) != null &&
+ (q = ch.findTreeNode(h, k, kc)) != null))
+ return q;
+ }
+ dir = tieBreakOrder(k, pk);
+ }
+
+ TreeNode