2 * Written by Cliff Click and released to the public domain, as explained at
\r
3 * http://creativecommons.org/licenses/publicdomain
\r
6 package org.cliffc.high_scale_lib;
\r
7 import java.io.IOException;
\r
8 import java.io.Serializable;
\r
9 import java.lang.reflect.Field;
\r
11 import java.util.concurrent.ConcurrentMap;
\r
12 import java.util.concurrent.atomic.*;
\r
13 import sun.misc.Unsafe;
\r
16 * A lock-free alternate implementation of {@link java.util.concurrent.ConcurrentHashMap}
\r
17 * with better scaling properties and generally lower costs to mutate the Map.
\r
18 * It provides identical correctness properties as ConcurrentHashMap. All
\r
19 * operations are non-blocking and multi-thread safe, including all update
\r
20 * operations. {@link NonBlockingHashMap} scales substatially better than
\r
21 * {@link java.util.concurrent.ConcurrentHashMap} for high update rates, even with a
\r
22 * large concurrency factor. Scaling is linear up to 768 CPUs on a 768-CPU
\r
23 * Azul box, even with 100% updates or 100% reads or any fraction in-between.
\r
24 * Linear scaling up to all cpus has been observed on a 32-way Sun US2 box,
\r
25 * 32-way Sun Niagra box, 8-way Intel box and a 4-way Power box.
\r
27 * This class obeys the same functional specification as {@link
\r
28 * java.util.Hashtable}, and includes versions of methods corresponding to
\r
29 * each method of <tt>Hashtable</tt>. However, even though all operations are
\r
30 * thread-safe, operations do <em>not</em> entail locking and there is
\r
31 * <em>not</em> any support for locking the entire table in a way that
\r
32 * prevents all access. This class is fully interoperable with
\r
33 * <tt>Hashtable</tt> in programs that rely on its thread safety but not on
\r
34 * its synchronization details.
\r
36 * <p> Operations (including <tt>put</tt>) generally do not block, so may
\r
37 * overlap with other update operations (including other <tt>puts</tt> and
\r
38 * <tt>removes</tt>). Retrievals reflect the results of the most recently
\r
39 * <em>completed</em> update operations holding upon their onset. For
\r
40 * aggregate operations such as <tt>putAll</tt>, concurrent retrievals may
\r
41 * reflect insertion or removal of only some entries. Similarly, Iterators
\r
42 * and Enumerations return elements reflecting the state of the hash table at
\r
43 * some point at or since the creation of the iterator/enumeration. They do
\r
44 * <em>not</em> throw {@link ConcurrentModificationException}. However,
\r
45 * iterators are designed to be used by only one thread at a time.
\r
47 * <p> Very full tables, or tables with high reprobe rates may trigger an
\r
48 * internal resize operation to move into a larger table. Resizing is not
\r
49 * terribly expensive, but it is not free either; during resize operations
\r
50 * table throughput may drop somewhat. All threads that visit the table
\r
51 * during a resize will 'help' the resizing but will still be allowed to
\r
52 * complete their operation before the resize is finished (i.e., a simple
\r
53 * 'get' operation on a million-entry table undergoing resizing will not need
\r
54 * to block until the entire million entries are copied).
\r
56 * <p>This class and its views and iterators implement all of the
\r
57 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
\r
60 * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
\r
61 * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
\r
65 * @author Cliff Click
\r
66 * @param <TypeK> the type of keys maintained by this map
\r
67 * @param <TypeV> the type of mapped values
\r
70 * @author Prashant Deva - moved hash() function out of get_impl() so it is
\r
71 * not calculated multiple times.
\r
74 public class NonBlockingHashMap<TypeK, TypeV>
\r
75 extends AbstractMap<TypeK, TypeV>
\r
76 implements ConcurrentMap<TypeK, TypeV>, Cloneable, Serializable {
\r
78 private static final long serialVersionUID = 1234123412341234123L;
\r
80 private static final int REPROBE_LIMIT=10; // Too many reprobes then force a table-resize
\r
82 // --- Bits to allow Unsafe access to arrays
\r
83 private static final Unsafe _unsafe = UtilUnsafe.getUnsafe();
\r
84 private static final int _Obase = _unsafe.arrayBaseOffset(Object[].class);
\r
85 private static final int _Oscale = _unsafe.arrayIndexScale(Object[].class);
\r
86 private static long rawIndex(final Object[] ary, final int idx) {
\r
87 assert idx >= 0 && idx < ary.length;
\r
88 return _Obase + idx * _Oscale;
\r
91 // --- Setup to use Unsafe
\r
92 private static final long _kvs_offset;
\r
93 static { // <clinit>
\r
95 try { f = NonBlockingHashMap.class.getDeclaredField("_kvs"); }
\r
96 catch( java.lang.NoSuchFieldException e ) { throw new RuntimeException(e); }
\r
97 _kvs_offset = _unsafe.objectFieldOffset(f);
\r
99 private final boolean CAS_kvs( final Object[] oldkvs, final Object[] newkvs ) {
\r
100 return _unsafe.compareAndSwapObject(this, _kvs_offset, oldkvs, newkvs );
\r
103 // --- Adding a 'prime' bit onto Values via wrapping with a junk wrapper class
\r
104 private static final class Prime {
\r
106 Prime( Object V ) { _V = V; }
\r
107 static Object unbox( Object V ) { return V instanceof Prime ? ((Prime)V)._V : V; }
\r
110 // --- hash ----------------------------------------------------------------
\r
111 // Helper function to spread lousy hashCodes
\r
112 private static final int hash(final Object key) {
\r
113 int h = key.hashCode(); // The real hashCode call
\r
114 // Spread bits to regularize both segment and index locations,
\r
115 // using variant of single-word Wang/Jenkins hash.
\r
116 h += (h << 15) ^ 0xffffcd7d;
\r
120 h += (h << 2) + (h << 14);
\r
121 return h ^ (h >>> 16);
\r
124 // --- The Hash Table --------------------
\r
125 // Slot 0 is always used for a 'CHM' entry below to hold the interesting
\r
126 // bits of the hash table. Slot 1 holds full hashes as an array of ints.
\r
127 // Slots {2,3}, {4,5}, etc hold {Key,Value} pairs. The entire hash table
\r
128 // can be atomically replaced by CASing the _kvs field.
\r
130 // Why is CHM buried inside the _kvs Object array, instead of the other way
\r
131 // around? The CHM info is used during resize events and updates, but not
\r
132 // during standard 'get' operations. I assume 'get' is much more frequent
\r
133 // than 'put'. 'get' can skip the extra indirection of skipping through the
\r
134 // CHM to reach the _kvs array.
\r
135 private transient Object[] _kvs;
\r
136 private static final CHM chm (Object[] kvs) { return (CHM )kvs[0]; }
\r
137 private static final int[] hashes(Object[] kvs) { return (int[])kvs[1]; }
\r
138 // Number of K,V pairs in the table
\r
139 private static final int len(Object[] kvs) { return (kvs.length-2)>>1; }
\r
141 // Time since last resize
\r
142 private transient long _last_resize_milli;
\r
144 // --- Minimum table size ----------------
\r
145 // Pick size 8 K/V pairs, which turns into (8*2+2)*4+12 = 84 bytes on a
\r
146 // standard 32-bit HotSpot, and (8*2+2)*8+12 = 156 bytes on 64-bit Azul.
\r
147 private static final int MIN_SIZE_LOG=3; //
\r
148 private static final int MIN_SIZE=(1<<MIN_SIZE_LOG); // Must be power of 2
\r
150 // --- Sentinels -------------------------
\r
151 // No-Match-Old - putIfMatch does updates only if it matches the old value,
\r
152 // and NO_MATCH_OLD basically counts as a wildcard match.
\r
153 private static final Object NO_MATCH_OLD = new Object(); // Sentinel
\r
154 // Match-Any-not-null - putIfMatch does updates only if it find a real old
\r
156 private static final Object MATCH_ANY = new Object(); // Sentinel
\r
157 // This K/V pair has been deleted (but the Key slot is forever claimed).
\r
158 // The same Key can be reinserted with a new value later.
\r
159 private static final Object TOMBSTONE = new Object();
\r
160 // Prime'd or box'd version of TOMBSTONE. This K/V pair was deleted, then a
\r
161 // table resize started. The K/V pair has been marked so that no new
\r
162 // updates can happen to the old table (and since the K/V pair was deleted
\r
163 // nothing was copied to the new table).
\r
164 private static final Prime TOMBPRIME = new Prime(TOMBSTONE);
\r
166 // --- key,val -------------------------------------------------------------
\r
167 // Access K,V for a given idx
\r
169 // Note that these are static, so that the caller is forced to read the _kvs
\r
170 // field only once, and share that read across all key/val calls - lest the
\r
171 // _kvs field move out from under us and back-to-back key & val calls refer
\r
172 // to different _kvs arrays.
\r
173 private static final Object key(Object[] kvs,int idx) { return kvs[(idx<<1)+2]; }
\r
174 private static final Object val(Object[] kvs,int idx) { return kvs[(idx<<1)+3]; }
\r
175 private static final boolean CAS_key( Object[] kvs, int idx, Object old, Object key ) {
\r
176 return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+2), old, key );
\r
178 private static final boolean CAS_val( Object[] kvs, int idx, Object old, Object val ) {
\r
179 return _unsafe.compareAndSwapObject( kvs, rawIndex(kvs,(idx<<1)+3), old, val );
\r
183 // --- dump ----------------------------------------------------------------
\r
184 /** Verbose printout of table internals, useful for debugging. */
\r
185 public final void print() {
\r
186 System.out.println("=========");
\r
188 System.out.println("=========");
\r
190 // print the entire state of the table
\r
191 private final void print( Object[] kvs ) {
\r
192 for( int i=0; i<len(kvs); i++ ) {
\r
193 Object K = key(kvs,i);
\r
195 String KS = (K == TOMBSTONE) ? "XXX" : K.toString();
\r
196 Object V = val(kvs,i);
\r
197 Object U = Prime.unbox(V);
\r
198 String p = (V==U) ? "" : "prime_";
\r
199 String US = (U == TOMBSTONE) ? "tombstone" : U.toString();
\r
200 System.out.println(""+i+" ("+KS+","+p+US+")");
\r
203 Object[] newkvs = chm(kvs)._newkvs; // New table, if any
\r
204 if( newkvs != null ) {
\r
205 System.out.println("----");
\r
209 // print only the live values, broken down by the table they are in
\r
210 private final void print2( Object[] kvs) {
\r
211 for( int i=0; i<len(kvs); i++ ) {
\r
212 Object key = key(kvs,i);
\r
213 Object val = val(kvs,i);
\r
214 Object U = Prime.unbox(val);
\r
215 if( key != null && key != TOMBSTONE && // key is sane
\r
216 val != null && U != TOMBSTONE ) { // val is sane
\r
217 String p = (val==U) ? "" : "prime_";
\r
218 System.out.println(""+i+" ("+key+","+p+val+")");
\r
221 Object[] newkvs = chm(kvs)._newkvs; // New table, if any
\r
222 if( newkvs != null ) {
\r
223 System.out.println("----");
\r
228 // Count of reprobes
\r
229 private transient Counter _reprobes = new Counter();
\r
230 /** Get and clear the current count of reprobes. Reprobes happen on key
\r
231 * collisions, and a high reprobe rate may indicate a poor hash function or
\r
232 * weaknesses in the table resizing function.
\r
233 * @return the count of reprobes since the last call to {@link #reprobes}
\r
234 * or since the table was created. */
\r
235 public long reprobes() { long r = _reprobes.get(); _reprobes = new Counter(); return r; }
\r
238 // --- reprobe_limit -----------------------------------------------------
\r
239 // Heuristic to decide if we have reprobed toooo many times. Running over
\r
240 // the reprobe limit on a 'get' call acts as a 'miss'; on a 'put' call it
\r
241 // can trigger a table resize. Several places must have exact agreement on
\r
242 // what the reprobe_limit is, so we share it here.
\r
243 private static final int reprobe_limit( int len ) {
\r
244 return REPROBE_LIMIT + (len>>2);
\r
247 // --- NonBlockingHashMap --------------------------------------------------
\r
250 /** Create a new NonBlockingHashMap with default minimum size (currently set
\r
251 * to 8 K/V pairs or roughly 84 bytes on a standard 32-bit JVM). */
\r
252 public NonBlockingHashMap( ) { this(MIN_SIZE); }
\r
254 /** Create a new NonBlockingHashMap with initial room for the given number of
\r
255 * elements, thus avoiding internal resizing operations to reach an
\r
256 * appropriate size. Large numbers here when used with a small count of
\r
257 * elements will sacrifice space for a small amount of time gained. The
\r
258 * initial size will be rounded up internally to the next larger power of 2. */
\r
259 public NonBlockingHashMap( final int initial_sz ) { initialize(initial_sz); }
\r
260 private final void initialize( int initial_sz ) {
\r
261 if( initial_sz < 0 ) throw new IllegalArgumentException();
\r
262 int i; // Convert to next largest power-of-2
\r
263 if( initial_sz > 1024*1024 ) initial_sz = 1024*1024;
\r
264 for( i=MIN_SIZE_LOG; (1<<i) < (initial_sz<<2); i++ ) ;
\r
265 // Double size for K,V pairs, add 1 for CHM and 1 for hashes
\r
266 _kvs = new Object[((1<<i)<<1)+2];
\r
267 _kvs[0] = new CHM(new Counter()); // CHM in slot 0
\r
268 _kvs[1] = new int[1<<i]; // Matching hash entries
\r
269 _last_resize_milli = System.currentTimeMillis();
\r
271 // Version for subclassed readObject calls, to be called after the defaultReadObject
\r
272 protected final void initialize() { initialize(MIN_SIZE); }
\r
274 // --- wrappers ------------------------------------------------------------
\r
276 /** Returns the number of key-value mappings in this map.
\r
277 * @return the number of key-value mappings in this map */
\r
279 public int size ( ) { return chm(_kvs).size(); }
\r
280 /** Returns <tt>size() == 0</tt>.
\r
281 * @return <tt>size() == 0</tt> */
\r
283 public boolean isEmpty ( ) { return size() == 0; }
\r
285 /** Tests if the key in the table using the <tt>equals</tt> method.
\r
286 * @return <tt>true</tt> if the key is in the table using the <tt>equals</tt> method
\r
287 * @throws NullPointerException if the specified key is null */
\r
289 public boolean containsKey( Object key ) { return get(key) != null; }
\r
291 /** Legacy method testing if some key maps into the specified value in this
\r
292 * table. This method is identical in functionality to {@link
\r
293 * #containsValue}, and exists solely to ensure full compatibility with
\r
294 * class {@link java.util.Hashtable}, which supported this method prior to
\r
295 * introduction of the Java Collections framework.
\r
296 * @param val a value to search for
\r
297 * @return <tt>true</tt> if this map maps one or more keys to the specified value
\r
298 * @throws NullPointerException if the specified value is null */
\r
299 public boolean contains ( Object val ) { return containsValue(val); }
\r
301 /** Maps the specified key to the specified value in the table. Neither key
\r
302 * nor value can be null.
\r
303 * <p> The value can be retrieved by calling {@link #get} with a key that is
\r
304 * equal to the original key.
\r
305 * @param key key with which the specified value is to be associated
\r
306 * @param val value to be associated with the specified key
\r
307 * @return the previous value associated with <tt>key</tt>, or
\r
308 * <tt>null</tt> if there was no mapping for <tt>key</tt>
\r
309 * @throws NullPointerException if the specified key or value is null */
\r
311 public TypeV put ( TypeK key, TypeV val ) { return putIfMatch( key, val, NO_MATCH_OLD); }
\r
313 /** Atomically, do a {@link #put} if-and-only-if the key is not mapped.
\r
314 * Useful to ensure that only a single mapping for the key exists, even if
\r
315 * many threads are trying to create the mapping in parallel.
\r
316 * @return the previous value associated with the specified key,
\r
317 * or <tt>null</tt> if there was no mapping for the key
\r
318 * @throws NullPointerException if the specified key or value is null */
\r
319 public TypeV putIfAbsent( TypeK key, TypeV val ) { return putIfMatch( key, val, TOMBSTONE ); }
\r
321 /** Removes the key (and its corresponding value) from this map.
\r
322 * This method does nothing if the key is not in the map.
\r
323 * @return the previous value associated with <tt>key</tt>, or
\r
324 * <tt>null</tt> if there was no mapping for <tt>key</tt>
\r
325 * @throws NullPointerException if the specified key is null */
\r
327 public TypeV remove ( Object key ) { return putIfMatch( key,TOMBSTONE, NO_MATCH_OLD); }
\r
329 /** Atomically do a {@link #remove(Object)} if-and-only-if the key is mapped
\r
330 * to a value which is <code>equals</code> to the given value.
\r
331 * @throws NullPointerException if the specified key or value is null */
\r
332 public boolean remove ( Object key,Object val ) { return putIfMatch( key,TOMBSTONE, val ) == val; }
\r
334 /** Atomically do a <code>put(key,val)</code> if-and-only-if the key is
\r
335 * mapped to some value already.
\r
336 * @throws NullPointerException if the specified key or value is null */
\r
337 public TypeV replace ( TypeK key, TypeV val ) { return putIfMatch( key, val,MATCH_ANY ); }
\r
339 /** Atomically do a <code>put(key,newValue)</code> if-and-only-if the key is
\r
340 * mapped a value which is <code>equals</code> to <code>oldValue</code>.
\r
341 * @throws NullPointerException if the specified key or value is null */
\r
342 public boolean replace ( TypeK key, TypeV oldValue, TypeV newValue ) {
\r
343 return putIfMatch( key, newValue, oldValue ) == oldValue;
\r
346 private final TypeV putIfMatch( Object key, Object newVal, Object oldVal ) {
\r
347 if (oldVal == null || newVal == null) throw new NullPointerException();
\r
348 final Object res = putIfMatch( this, _kvs, key, newVal, oldVal );
\r
349 assert !(res instanceof Prime);
\r
350 assert res != null;
\r
351 return res == TOMBSTONE ? null : (TypeV)res;
\r
355 /** Copies all of the mappings from the specified map to this one, replacing
\r
356 * any existing mappings.
\r
357 * @param m mappings to be stored in this map */
\r
359 public void putAll(Map<? extends TypeK, ? extends TypeV> m) {
\r
360 for (Map.Entry<? extends TypeK, ? extends TypeV> e : m.entrySet())
\r
361 put(e.getKey(), e.getValue());
\r
364 /** Removes all of the mappings from this map. */
\r
366 public void clear() { // Smack a new empty table down
\r
367 Object[] newkvs = new NonBlockingHashMap(MIN_SIZE)._kvs;
\r
368 while( !CAS_kvs(_kvs,newkvs) ) // Spin until the clear works
\r
372 /** Returns <tt>true</tt> if this Map maps one or more keys to the specified
\r
373 * value. <em>Note</em>: This method requires a full internal traversal of the
\r
374 * hash table and is much slower than {@link #containsKey}.
\r
375 * @param val value whose presence in this map is to be tested
\r
376 * @return <tt>true</tt> if this map maps one or more keys to the specified value
\r
377 * @throws NullPointerException if the specified value is null */
\r
379 public boolean containsValue( final Object val ) {
\r
380 if( val == null ) throw new NullPointerException();
\r
381 for( TypeV V : values() )
\r
382 if( V == val || V.equals(val) )
\r
387 // This function is supposed to do something for Hashtable, and the JCK
\r
388 // tests hang until it gets called... by somebody ... for some reason,
\r
390 protected void rehash() {
\r
394 * Creates a shallow copy of this hashtable. All the structure of the
\r
395 * hashtable itself is copied, but the keys and values are not cloned.
\r
396 * This is a relatively expensive operation.
\r
398 * @return a clone of the hashtable.
\r
401 public Object clone() {
\r
403 // Must clone, to get the class right; NBHM might have been
\r
404 // extended so it would be wrong to just make a new NBHM.
\r
405 NonBlockingHashMap<TypeK,TypeV> t = (NonBlockingHashMap<TypeK,TypeV>) super.clone();
\r
406 // But I don't have an atomic clone operation - the underlying _kvs
\r
407 // structure is undergoing rapid change. If I just clone the _kvs
\r
408 // field, the CHM in _kvs[0] won't be in sync.
\r
410 // Wipe out the cloned array (it was shallow anyways).
\r
413 for( TypeK K : keySet() ) {
\r
414 final TypeV V = get(K); // Do an official 'get'
\r
418 } catch (CloneNotSupportedException e) {
\r
419 // this shouldn't happen, since we are Cloneable
\r
420 throw new InternalError();
\r
425 * Returns a string representation of this map. The string representation
\r
426 * consists of a list of key-value mappings in the order returned by the
\r
427 * map's <tt>entrySet</tt> view's iterator, enclosed in braces
\r
428 * (<tt>"{}"</tt>). Adjacent mappings are separated by the characters
\r
429 * <tt>", "</tt> (comma and space). Each key-value mapping is rendered as
\r
430 * the key followed by an equals sign (<tt>"="</tt>) followed by the
\r
431 * associated value. Keys and values are converted to strings as by
\r
432 * {@link String#valueOf(Object)}.
\r
434 * @return a string representation of this map
\r
437 public String toString() {
\r
438 Iterator<Entry<TypeK,TypeV>> i = entrySet().iterator();
\r
442 StringBuilder sb = new StringBuilder();
\r
445 Entry<TypeK,TypeV> e = i.next();
\r
446 TypeK key = e.getKey();
\r
447 TypeV value = e.getValue();
\r
448 sb.append(key == this ? "(this Map)" : key);
\r
450 sb.append(value == this ? "(this Map)" : value);
\r
452 return sb.append('}').toString();
\r
457 // --- keyeq ---------------------------------------------------------------
\r
458 // Check for key equality. Try direct pointer compare first, then see if
\r
459 // the hashes are unequal (fast negative test) and finally do the full-on
\r
460 // 'equals' v-call.
\r
461 private static boolean keyeq( Object K, Object key, int[] hashes, int hash, int fullhash ) {
\r
463 K==key || // Either keys match exactly OR
\r
464 // hash exists and matches? hash can be zero during the install of a
\r
465 // new key/value pair.
\r
466 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
\r
467 // Do not call the users' "equals()" call with a Tombstone, as this can
\r
468 // surprise poorly written "equals()" calls that throw exceptions
\r
469 // instead of simply returning false.
\r
470 K != TOMBSTONE && // Do not call users' equals call with a Tombstone
\r
471 // Do the match the hard way - with the users' key being the loop-
\r
472 // invariant "this" pointer. I could have flipped the order of
\r
473 // operands (since equals is commutative), but I'm making mega-morphic
\r
474 // v-calls in a reprobing loop and nailing down the 'this' argument
\r
475 // gives both the JIT and the hardware a chance to prefetch the call target.
\r
476 key.equals(K)); // Finally do the hard match
\r
479 // --- get -----------------------------------------------------------------
\r
480 /** Returns the value to which the specified key is mapped, or {@code null}
\r
481 * if this map contains no mapping for the key.
\r
482 * <p>More formally, if this map contains a mapping from a key {@code k} to
\r
483 * a value {@code v} such that {@code key.equals(k)}, then this method
\r
484 * returns {@code v}; otherwise it returns {@code null}. (There can be at
\r
485 * most one such mapping.)
\r
486 * @throws NullPointerException if the specified key is null */
\r
487 // Never returns a Prime nor a Tombstone.
\r
489 public TypeV get( Object key ) {
\r
490 final int fullhash= hash (key); // throws NullPointerException if key is null
\r
491 final Object V = get_impl(this,_kvs,key,fullhash);
\r
492 assert !(V instanceof Prime); // Never return a Prime
\r
496 private static final Object get_impl( final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final int fullhash ) {
\r
497 final int len = len (kvs); // Count of key/value pairs, reads kvs.length
\r
498 final CHM chm = chm (kvs); // The CHM, for a volatile read below; reads slot 0 of kvs
\r
499 final int[] hashes=hashes(kvs); // The memoized hashes; reads slot 1 of kvs
\r
501 int idx = fullhash & (len-1); // First key hash
\r
503 // Main spin/reprobe loop, looking for a Key hit
\r
506 // Probe table. Each read of 'val' probably misses in cache in a big
\r
507 // table; hopefully the read of 'key' then hits in cache.
\r
508 final Object K = key(kvs,idx); // Get key before volatile read, could be null
\r
509 final Object V = val(kvs,idx); // Get value before volatile read, could be null or Tombstone or Prime
\r
510 if( K == null ) return null; // A clear miss
\r
512 // We need a volatile-read here to preserve happens-before semantics on
\r
513 // newly inserted Keys. If the Key body was written just before inserting
\r
514 // into the table a Key-compare here might read the uninitalized Key body.
\r
515 // Annoyingly this means we have to volatile-read before EACH key compare.
\r
517 // We also need a volatile-read between reading a newly inserted Value
\r
518 // and returning the Value (so the user might end up reading the stale
\r
519 // Value contents). Same problem as with keys - and the one volatile
\r
520 // read covers both.
\r
521 final Object[] newkvs = chm._newkvs; // VOLATILE READ before key compare
\r
524 if( keyeq(K,key,hashes,idx,fullhash) ) {
\r
525 // Key hit! Check for no table-copy-in-progress
\r
526 if( !(V instanceof Prime) ) // No copy?
\r
527 return (V == TOMBSTONE) ? null : V; // Return the value
\r
528 // Key hit - but slot is (possibly partially) copied to the new table.
\r
529 // Finish the copy & retry in the new table.
\r
530 return get_impl(topmap,chm.copy_slot_and_check(topmap,kvs,idx,key),key,fullhash); // Retry in the new table
\r
532 // get and put must have the same key lookup logic! But only 'put'
\r
533 // needs to force a table-resize for a too-long key-reprobe sequence.
\r
534 // Check for too-many-reprobes on get - and flip to the new table.
\r
535 // ???? Why a TOMBSTONE key means no more keys in this table
\r
536 // because a TOMBSTONE key should be null before
\r
537 if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes
\r
538 key == TOMBSTONE ) // found a TOMBSTONE key, means no more keys in this table
\r
539 return newkvs == null ? null : get_impl(topmap,topmap.help_copy(newkvs),key,fullhash); // Retry in the new table
\r
541 idx = (idx+1)&(len-1); // Reprobe by 1! (could now prefetch)
\r
545 // --- putIfMatch ---------------------------------------------------------
\r
546 // Put, Remove, PutIfAbsent, etc. Return the old value. If the returned
\r
547 // value is equal to expVal (or expVal is NO_MATCH_OLD) then the put can be
\r
548 // assumed to work (although might have been immediately overwritten). Only
\r
549 // the path through copy_slot passes in an expected value of null, and
\r
550 // putIfMatch only returns a null if passed in an expected null.
\r
551 private static final Object putIfMatch( final NonBlockingHashMap topmap, final Object[] kvs, final Object key, final Object putval, final Object expVal ) {
\r
552 assert putval != null;
\r
553 assert !(putval instanceof Prime);
\r
554 assert !(expVal instanceof Prime);
\r
555 final int fullhash = hash (key); // throws NullPointerException if key null
\r
556 final int len = len (kvs); // Count of key/value pairs, reads kvs.length
\r
557 final CHM chm = chm (kvs); // Reads kvs[0]
\r
558 final int[] hashes = hashes(kvs); // Reads kvs[1], read before kvs[0]
\r
559 int idx = fullhash & (len-1);
\r
562 // Key-Claim stanza: spin till we can claim a Key (or force a resizing).
\r
564 Object K=null, V=null;
\r
565 Object[] newkvs=null;
\r
566 while( true ) { // Spin till we get a Key slot
\r
567 V = val(kvs,idx); // Get old value (before volatile read below!)
\r
568 K = key(kvs,idx); // Get current key
\r
569 if( K == null ) { // Slot is free?
\r
570 // Found an empty Key slot - which means this Key has never been in
\r
571 // this table. No need to put a Tombstone - the Key is not here!
\r
572 if( putval == TOMBSTONE ) return putval; // Not-now & never-been in this table
\r
573 // Claim the null key-slot
\r
574 if( CAS_key(kvs,idx, null, key ) ) { // Claim slot for Key
\r
575 chm._slots.add(1); // Raise key-slots-used count
\r
576 hashes[idx] = fullhash; // Memoize fullhash
\r
579 // CAS to claim the key-slot failed.
\r
581 // This re-read of the Key points out an annoying short-coming of Java
\r
582 // CAS. Most hardware CAS's report back the existing value - so that
\r
583 // if you fail you have a *witness* - the value which caused the CAS
\r
584 // to fail. The Java API turns this into a boolean destroying the
\r
585 // witness. Re-reading does not recover the witness because another
\r
586 // thread can write over the memory after the CAS. Hence we can be in
\r
587 // the unfortunate situation of having a CAS fail *for cause* but
\r
588 // having that cause removed by a later store. This turns a
\r
589 // non-spurious-failure CAS (such as Azul has) into one that can
\r
590 // apparently spuriously fail - and we avoid apparent spurious failure
\r
591 // by not allowing Keys to ever change.
\r
592 K = key(kvs,idx); // CAS failed, get updated value
\r
593 assert K != null; // If keys[idx] is null, CAS shoulda worked
\r
595 // Key slot was not null, there exists a Key here
\r
597 // We need a volatile-read here to preserve happens-before semantics on
\r
598 // newly inserted Keys. If the Key body was written just before inserting
\r
599 // into the table a Key-compare here might read the uninitalized Key body.
\r
600 // Annoyingly this means we have to volatile-read before EACH key compare.
\r
601 newkvs = chm._newkvs; // VOLATILE READ before key compare
\r
603 if( keyeq(K,key,hashes,idx,fullhash) )
\r
606 // get and put must have the same key lookup logic! Lest 'get' give
\r
607 // up looking too soon.
\r
608 //topmap._reprobes.add(1);
\r
609 if( ++reprobe_cnt >= reprobe_limit(len) || // too many probes or
\r
610 key == TOMBSTONE ) { // found a TOMBSTONE key, means no more keys
\r
611 // We simply must have a new table to do a 'put'. At this point a
\r
612 // 'get' will also go to the new table (if any). We do not need
\r
613 // to claim a key slot (indeed, we cannot find a free one to claim!).
\r
614 newkvs = chm.resize(topmap,kvs);
\r
615 if( expVal != null ) topmap.help_copy(newkvs); // help along an existing copy
\r
616 return putIfMatch(topmap,newkvs,key,putval,expVal);
\r
619 idx = (idx+1)&(len-1); // Reprobe!
\r
620 } // End of spinning till we get a Key slot
\r
623 // Found the proper Key slot, now update the matching Value slot. We
\r
624 // never put a null, so Value slots monotonically move from null to
\r
625 // not-null (deleted Values use Tombstone). Thus if 'V' is null we
\r
626 // fail this fast cutout and fall into the check for table-full.
\r
627 if( putval == V ) return V; // Fast cutout for no-change
\r
629 // See if we want to move to a new table (to avoid high average re-probe
\r
630 // counts). We only check on the initial set of a Value from null to
\r
631 // not-null (i.e., once per key-insert). Of course we got a 'free' check
\r
632 // of newkvs once per key-compare (not really free, but paid-for by the
\r
633 // time we get here).
\r
634 if( newkvs == null && // New table-copy already spotted?
\r
635 // Once per fresh key-insert check the hard way
\r
636 ((V == null && chm.tableFull(reprobe_cnt,len)) ||
\r
637 // Or we found a Prime, but the JMM allowed reordering such that we
\r
638 // did not spot the new table (very rare race here: the writing
\r
639 // thread did a CAS of _newkvs then a store of a Prime. This thread
\r
640 // reads the Prime, then reads _newkvs - but the read of Prime was so
\r
641 // delayed (or the read of _newkvs was so accelerated) that they
\r
642 // swapped and we still read a null _newkvs. The resize call below
\r
643 // will do a CAS on _newkvs forcing the read.
\r
644 V instanceof Prime) )
\r
645 newkvs = chm.resize(topmap,kvs); // Force the new table copy to start
\r
646 // See if we are moving to a new table.
\r
647 // If so, copy our slot and retry in the new table.
\r
648 if( newkvs != null )
\r
649 return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal);
\r
652 // We are finally prepared to update the existing table
\r
654 assert !(V instanceof Prime);
\r
656 // Must match old, and we do not? Then bail out now. Note that either V
\r
657 // or expVal might be TOMBSTONE. Also V can be null, if we've never
\r
658 // inserted a value before. expVal can be null if we are called from
\r
661 if( expVal != NO_MATCH_OLD && // Do we care about expected-Value at all?
\r
662 V != expVal && // No instant match already?
\r
663 (expVal != MATCH_ANY || V == TOMBSTONE || V == null) &&
\r
664 !(V==null && expVal == TOMBSTONE) && // Match on null/TOMBSTONE combo
\r
665 (expVal == null || !expVal.equals(V)) ) // Expensive equals check at the last
\r
666 return V; // Do not update!
\r
668 // Actually change the Value in the Key,Value pair
\r
669 if( CAS_val(kvs, idx, V, putval ) ) {
\r
670 // CAS succeeded - we did the update!
\r
671 // Both normal put's and table-copy calls putIfMatch, but table-copy
\r
672 // does not (effectively) increase the number of live k/v pairs.
\r
673 if( expVal != null ) {
\r
674 // Adjust sizes - a striped counter
\r
675 if( (V == null || V == TOMBSTONE) && putval != TOMBSTONE ) chm._size.add( 1);
\r
676 if( !(V == null || V == TOMBSTONE) && putval == TOMBSTONE ) chm._size.add(-1);
\r
678 return (V==null && expVal!=null) ? TOMBSTONE : V;
\r
681 V = val(kvs,idx); // Get new value
\r
682 // If a Prime'd value got installed, we need to re-run the put on the
\r
683 // new table. Otherwise we lost the CAS to another racing put.
\r
684 // Simply retry from the start.
\r
685 if( V instanceof Prime )
\r
686 return putIfMatch(topmap,chm.copy_slot_and_check(topmap,kvs,idx,expVal),key,putval,expVal);
\r
690 // --- help_copy ---------------------------------------------------------
\r
691 // Help along an existing resize operation. This is just a fast cut-out
\r
692 // wrapper, to encourage inlining for the fast no-copy-in-progress case. We
\r
693 // always help the top-most table copy, even if there are nested table
\r
694 // copies in progress.
\r
695 private final Object[] help_copy( Object[] helper ) {
\r
696 // Read the top-level KVS only once. We'll try to help this copy along,
\r
697 // even if it gets promoted out from under us (i.e., the copy completes
\r
698 // and another KVS becomes the top-level copy).
\r
699 Object[] topkvs = _kvs;
\r
700 CHM topchm = chm(topkvs);
\r
701 if( topchm._newkvs == null ) return helper; // No copy in-progress
\r
702 topchm.help_copy_impl(this,topkvs,false);
\r
707 // --- CHM -----------------------------------------------------------------
\r
708 // The control structure for the NonBlockingHashMap
\r
709 private static final class CHM<TypeK,TypeV> {
\r
710 // Size in active K,V pairs
\r
711 private final Counter _size;
\r
712 public int size () { return (int)_size.get(); }
\r
715 // These next 2 fields are used in the resizing heuristics, to judge when
\r
716 // it is time to resize or copy the table. Slots is a count of used-up
\r
717 // key slots, and when it nears a large fraction of the table we probably
\r
718 // end up reprobing too much. Last-resize-milli is the time since the
\r
719 // last resize; if we are running back-to-back resizes without growing
\r
720 // (because there are only a few live keys but many slots full of dead
\r
721 // keys) then we need a larger table to cut down on the churn.
\r
723 // Count of used slots, to tell when table is full of dead unusable slots
\r
724 private final Counter _slots;
\r
725 public int slots() { return (int)_slots.get(); }
\r
728 // New mappings, used during resizing.
\r
729 // The 'new KVs' array - created during a resize operation. This
\r
730 // represents the new table being copied from the old one. It's the
\r
731 // volatile variable that is read as we cross from one table to the next,
\r
732 // to get the required memory orderings. It monotonically transits from
\r
733 // null to set (once).
\r
734 volatile Object[] _newkvs;
\r
735 private final AtomicReferenceFieldUpdater<CHM,Object[]> _newkvsUpdater =
\r
736 AtomicReferenceFieldUpdater.newUpdater(CHM.class,Object[].class, "_newkvs");
\r
737 // Set the _next field if we can.
\r
738 boolean CAS_newkvs( Object[] newkvs ) {
\r
739 while( _newkvs == null )
\r
740 if( _newkvsUpdater.compareAndSet(this,null,newkvs) )
\r
744 // Sometimes many threads race to create a new very large table. Only 1
\r
745 // wins the race, but the losers all allocate a junk large table with
\r
746 // hefty allocation costs. Attempt to control the overkill here by
\r
747 // throttling attempts to create a new table. I cannot really block here
\r
748 // (lest I lose the non-blocking property) but late-arriving threads can
\r
749 // give the initial resizing thread a little time to allocate the initial
\r
750 // new table. The Right Long Term Fix here is to use array-lets and
\r
751 // incrementally create the new very large array. In C I'd make the array
\r
752 // with malloc (which would mmap under the hood) which would only eat
\r
753 // virtual-address and not real memory - and after Somebody wins then we
\r
754 // could in parallel initialize the array. Java does not allow
\r
755 // un-initialized array creation (especially of ref arrays!).
\r
756 volatile long _resizers; // count of threads attempting an initial resize
\r
757 private static final AtomicLongFieldUpdater<CHM> _resizerUpdater =
\r
758 AtomicLongFieldUpdater.newUpdater(CHM.class, "_resizers");
\r
761 // Simple constructor
\r
762 CHM( Counter size ) {
\r
764 _slots= new Counter();
\r
767 // --- tableFull ---------------------------------------------------------
\r
768 // Heuristic to decide if this table is too full, and we should start a
\r
769 // new table. Note that if a 'get' call has reprobed too many times and
\r
770 // decided the table must be full, then always the estimate_sum must be
\r
771 // high and we must report the table is full. If we do not, then we might
\r
772 // end up deciding that the table is not full and inserting into the
\r
773 // current table, while a 'get' has decided the same key cannot be in this
\r
774 // table because of too many reprobes. The invariant is:
\r
775 // slots.estimate_sum >= max_reprobe_cnt >= reprobe_limit(len)
\r
776 private final boolean tableFull( int reprobe_cnt, int len ) {
\r
778 // Do the cheap check first: we allow some number of reprobes always
\r
779 reprobe_cnt >= REPROBE_LIMIT &&
\r
780 // More expensive check: see if the table is > 1/4 full.
\r
781 _slots.estimate_get() >= reprobe_limit(len);
\r
784 // --- resize ------------------------------------------------------------
\r
785 // Resizing after too many probes. "How Big???" heuristics are here.
\r
786 // Callers will (not this routine) will 'help_copy' any in-progress copy.
\r
787 // Since this routine has a fast cutout for copy-already-started, callers
\r
788 // MUST 'help_copy' lest we have a path which forever runs through
\r
789 // 'resize' only to discover a copy-in-progress which never progresses.
\r
790 private final Object[] resize( NonBlockingHashMap topmap, Object[] kvs) {
\r
791 assert chm(kvs) == this;
\r
793 // Check for resize already in progress, probably triggered by another thread
\r
794 Object[] newkvs = _newkvs; // VOLATILE READ
\r
795 if( newkvs != null ) // See if resize is already in progress
\r
796 return newkvs; // Use the new table already
\r
798 // No copy in-progress, so start one. First up: compute new table size.
\r
799 int oldlen = len(kvs); // Old count of K,V pairs allowed
\r
800 int sz = size(); // Get current table count of active K,V pairs
\r
801 int newsz = sz; // First size estimate
\r
803 // Heuristic to determine new size. We expect plenty of dead-slots-with-keys
\r
804 // and we need some decent padding to avoid endless reprobing.
\r
805 if( sz >= (oldlen>>2) ) { // If we are >25% full of keys then...
\r
806 newsz = oldlen<<1; // Double size
\r
807 if( sz >= (oldlen>>1) ) // If we are >50% full of keys then...
\r
808 newsz = oldlen<<2; // Double double size
\r
810 // This heuristic in the next 2 lines leads to a much denser table
\r
811 // with a higher reprobe rate
\r
812 //if( sz >= (oldlen>>1) ) // If we are >50% full of keys then...
\r
813 // newsz = oldlen<<1; // Double size
\r
815 // Last (re)size operation was very recent? Then double again; slows
\r
816 // down resize operations for tables subject to a high key churn rate.
\r
817 long tm = System.currentTimeMillis();
\r
819 if( newsz <= oldlen && // New table would shrink or hold steady?
\r
820 tm <= topmap._last_resize_milli+10000 && // Recent resize (less than 1 sec ago)
\r
821 (q=_slots.estimate_get()) >= (sz<<1) ) // 1/2 of keys are dead?
\r
822 newsz = oldlen<<1; // Double the existing size
\r
824 // Do not shrink, ever
\r
825 if( newsz < oldlen ) newsz = oldlen;
\r
827 // Convert to power-of-2
\r
829 for( log2=MIN_SIZE_LOG; (1<<log2) < newsz; log2++ ) ; // Compute log2 of size
\r
831 // Now limit the number of threads actually allocating memory to a
\r
832 // handful - lest we have 750 threads all trying to allocate a giant
\r
834 long r = _resizers;
\r
835 while( !_resizerUpdater.compareAndSet(this,r,r+1) )
\r
837 // Size calculation: 2 words (K+V) per table entry, plus a handful. We
\r
838 // guess at 32-bit pointers; 64-bit pointers screws up the size calc by
\r
839 // 2x but does not screw up the heuristic very much.
\r
840 int megs = ((((1<<log2)<<1)+4)<<3/*word to bytes*/)>>20/*megs*/;
\r
841 if( r >= 2 && megs > 0 ) { // Already 2 guys trying; wait and see
\r
842 newkvs = _newkvs; // Between dorking around, another thread did it
\r
843 if( newkvs != null ) // See if resize is already in progress
\r
844 return newkvs; // Use the new table already
\r
845 // TODO - use a wait with timeout, so we'll wakeup as soon as the new table
\r
846 // is ready, or after the timeout in any case.
\r
847 //synchronized( this ) { wait(8*megs); } // Timeout - we always wakeup
\r
848 // For now, sleep a tad and see if the 2 guys already trying to make
\r
849 // the table actually get around to making it happen.
\r
850 try { Thread.sleep(8*megs); } catch( Exception e ) { }
\r
852 // Last check, since the 'new' below is expensive and there is a chance
\r
853 // that another thread slipped in a new thread while we ran the heuristic.
\r
855 if( newkvs != null ) // See if resize is already in progress
\r
856 return newkvs; // Use the new table already
\r
858 // Double size for K,V pairs, add 1 for CHM
\r
859 newkvs = new Object[((1<<log2)<<1)+2]; // This can get expensive for big arrays
\r
860 newkvs[0] = new CHM(_size); // CHM in slot 0
\r
861 newkvs[1] = new int[1<<log2]; // hashes in slot 1
\r
863 // Another check after the slow allocation
\r
864 if( _newkvs != null ) // See if resize is already in progress
\r
865 return _newkvs; // Use the new table already
\r
867 // The new table must be CAS'd in so only 1 winner amongst duplicate
\r
868 // racing resizing threads. Extra CHM's will be GC'd.
\r
869 if( CAS_newkvs( newkvs ) ) { // NOW a resize-is-in-progress!
\r
870 //notifyAll(); // Wake up any sleepers
\r
871 //long nano = System.nanoTime();
\r
872 //System.out.println(" "+nano+" Resize from "+oldlen+" to "+(1<<log2)+" and had "+(_resizers-1)+" extras" );
\r
873 //if( System.out != null ) System.out.print("["+log2);
\r
874 topmap.rehash(); // Call for Hashtable's benefit
\r
875 } else // CAS failed?
\r
876 newkvs = _newkvs; // Reread new table
\r
881 // The next part of the table to copy. It monotonically transits from zero
\r
882 // to _kvs.length. Visitors to the table can claim 'work chunks' by
\r
883 // CAS'ing this field up, then copying the indicated indices from the old
\r
884 // table to the new table. Workers are not required to finish any chunk;
\r
885 // the counter simply wraps and work is copied duplicately until somebody
\r
886 // somewhere completes the count.
\r
887 volatile long _copyIdx = 0;
\r
888 static private final AtomicLongFieldUpdater<CHM> _copyIdxUpdater =
\r
889 AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyIdx");
\r
891 // Work-done reporting. Used to efficiently signal when we can move to
\r
892 // the new table. From 0 to len(oldkvs) refers to copying from the old
\r
893 // table to the new.
\r
894 volatile long _copyDone= 0;
\r
895 static private final AtomicLongFieldUpdater<CHM> _copyDoneUpdater =
\r
896 AtomicLongFieldUpdater.newUpdater(CHM.class, "_copyDone");
\r
898 // --- help_copy_impl ----------------------------------------------------
\r
899 // Help along an existing resize operation. We hope its the top-level
\r
900 // copy (it was when we started) but this CHM might have been promoted out
\r
901 // of the top position.
\r
902 private final void help_copy_impl( NonBlockingHashMap topmap, Object[] oldkvs, boolean copy_all ) {
\r
903 assert chm(oldkvs) == this;
\r
904 Object[] newkvs = _newkvs;
\r
905 assert newkvs != null; // Already checked by caller
\r
906 int oldlen = len(oldkvs); // Total amount to copy
\r
907 final int MIN_COPY_WORK = Math.min(oldlen,1024); // Limit per-thread work
\r
910 int panic_start = -1;
\r
911 int copyidx=-9999; // Fool javac to think it's initialized
\r
912 while( _copyDone < oldlen ) { // Still needing to copy?
\r
913 // Carve out a chunk of work. The counter wraps around so every
\r
914 // thread eventually tries to copy every slot repeatedly.
\r
916 // We "panic" if we have tried TWICE to copy every slot - and it still
\r
917 // has not happened. i.e., twice some thread somewhere claimed they
\r
918 // would copy 'slot X' (by bumping _copyIdx) but they never claimed to
\r
919 // have finished (by bumping _copyDone). Our choices become limited:
\r
920 // we can wait for the work-claimers to finish (and become a blocking
\r
921 // algorithm) or do the copy work ourselves. Tiny tables with huge
\r
922 // thread counts trying to copy the table often 'panic'.
\r
923 if( panic_start == -1 ) { // No panic?
\r
924 copyidx = (int)_copyIdx;
\r
925 while( copyidx < (oldlen<<1) && // 'panic' check
\r
926 !_copyIdxUpdater.compareAndSet(this,copyidx,copyidx+MIN_COPY_WORK) )
\r
927 copyidx = (int)_copyIdx; // Re-read
\r
928 if( !(copyidx < (oldlen<<1)) ) // Panic!
\r
929 panic_start = copyidx; // Record where we started to panic-copy
\r
932 // We now know what to copy. Try to copy.
\r
934 for( int i=0; i<MIN_COPY_WORK; i++ )
\r
935 if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead?
\r
936 workdone++; // Yes!
\r
937 if( workdone > 0 ) // Report work-done occasionally
\r
938 copy_check_and_promote( topmap, oldkvs, workdone );// See if we can promote
\r
939 //for( int i=0; i<MIN_COPY_WORK; i++ )
\r
940 // if( copy_slot(topmap,(copyidx+i)&(oldlen-1),oldkvs,newkvs) ) // Made an oldtable slot go dead?
\r
941 // copy_check_and_promote( topmap, oldkvs, 1 );// See if we can promote
\r
943 copyidx += MIN_COPY_WORK;
\r
944 // Uncomment these next 2 lines to turn on incremental table-copy.
\r
945 // Otherwise this thread continues to copy until it is all done.
\r
946 if( !copy_all && panic_start == -1 ) // No panic?
\r
947 return; // Then done copying after doing MIN_COPY_WORK
\r
949 // Extra promotion check, in case another thread finished all copying
\r
950 // then got stalled before promoting.
\r
951 copy_check_and_promote( topmap, oldkvs, 0 );// See if we can promote
\r
955 // --- copy_slot_and_check -----------------------------------------------
\r
956 // Copy slot 'idx' from the old table to the new table. If this thread
\r
957 // confirmed the copy, update the counters and check for promotion.
\r
959 // Returns the result of reading the volatile _newkvs, mostly as a
\r
960 // convenience to callers. We come here with 1-shot copy requests
\r
961 // typically because the caller has found a Prime, and has not yet read
\r
962 // the _newkvs volatile - which must have changed from null-to-not-null
\r
963 // before any Prime appears. So the caller needs to read the _newkvs
\r
964 // field to retry his operation in the new table, but probably has not
\r
966 private final Object[] copy_slot_and_check( NonBlockingHashMap topmap, Object[] oldkvs, int idx, Object should_help ) {
\r
967 assert chm(oldkvs) == this;
\r
968 Object[] newkvs = _newkvs; // VOLATILE READ
\r
969 // We're only here because the caller saw a Prime, which implies a
\r
970 // table-copy is in progress.
\r
971 assert newkvs != null;
\r
972 if( copy_slot(topmap,idx,oldkvs,_newkvs) ) // Copy the desired slot
\r
973 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
\r
974 // Generically help along any copy (except if called recursively from a helper)
\r
975 return (should_help == null) ? newkvs : topmap.help_copy(newkvs);
\r
978 // --- copy_check_and_promote --------------------------------------------
\r
979 private final void copy_check_and_promote( NonBlockingHashMap topmap, Object[] oldkvs, int workdone ) {
\r
980 assert chm(oldkvs) == this;
\r
981 int oldlen = len(oldkvs);
\r
982 // We made a slot unusable and so did some of the needed copy work
\r
983 long copyDone = _copyDone;
\r
984 assert (copyDone+workdone) <= oldlen;
\r
985 if( workdone > 0 ) {
\r
986 while( !_copyDoneUpdater.compareAndSet(this,copyDone,copyDone+workdone) ) {
\r
987 copyDone = _copyDone; // Reload, retry
\r
988 assert (copyDone+workdone) <= oldlen;
\r
990 //if( (10*copyDone/oldlen) != (10*(copyDone+workdone)/oldlen) )
\r
991 //System.out.print(" "+(copyDone+workdone)*100/oldlen+"%"+"_"+(_copyIdx*100/oldlen)+"%");
\r
994 // Check for copy being ALL done, and promote. Note that we might have
\r
995 // nested in-progress copies and manage to finish a nested copy before
\r
996 // finishing the top-level copy. We only promote top-level copies.
\r
997 if( copyDone+workdone == oldlen && // Ready to promote this table?
\r
998 topmap._kvs == oldkvs && // Looking at the top-level table?
\r
999 // Attempt to promote
\r
1000 topmap.CAS_kvs(oldkvs,_newkvs) ) {
\r
1001 topmap._last_resize_milli = System.currentTimeMillis(); // Record resize time for next check
\r
1002 //long nano = System.nanoTime();
\r
1003 //System.out.println(" "+nano+" Promote table to "+len(_newkvs));
\r
1004 //if( System.out != null ) System.out.print("]");
\r
1007 // --- copy_slot ---------------------------------------------------------
\r
1008 // Copy one K/V pair from oldkvs[i] to newkvs. Returns true if we can
\r
1009 // confirm that the new table guaranteed has a value for this old-table
\r
1010 // slot. We need an accurate confirmed-copy count so that we know when we
\r
1011 // can promote (if we promote the new table too soon, other threads may
\r
1012 // 'miss' on values not-yet-copied from the old table). We don't allow
\r
1013 // any direct updates on the new table, unless they first happened to the
\r
1014 // old table - so that any transition in the new table from null to
\r
1015 // not-null must have been from a copy_slot (or other old-table overwrite)
\r
1016 // and not from a thread directly writing in the new table. Thus we can
\r
1017 // count null-to-not-null transitions in the new table.
\r
1018 private boolean copy_slot( NonBlockingHashMap topmap, int idx, Object[] oldkvs, Object[] newkvs ) {
\r
1019 // Blindly set the key slot from null to TOMBSTONE, to eagerly stop
\r
1020 // fresh put's from inserting new values in the old table when the old
\r
1021 // table is mid-resize. We don't need to act on the results here,
\r
1022 // because our correctness stems from box'ing the Value field. Slamming
\r
1023 // the Key field is a minor speed optimization.
\r
1025 while( (key=key(oldkvs,idx)) == null )
\r
1026 CAS_key(oldkvs,idx, null, TOMBSTONE);
\r
1029 // Prevent new values from appearing in the old table.
\r
1030 // Box what we see in the old table, to prevent further updates.
\r
1031 Object oldval = val(oldkvs,idx); // Read OLD table
\r
1032 while( !(oldval instanceof Prime) ) {
\r
1033 final Prime box = (oldval == null || oldval == TOMBSTONE) ? TOMBPRIME : new Prime(oldval);
\r
1034 if( CAS_val(oldkvs,idx,oldval,box) ) { // CAS down a box'd version of oldval
\r
1035 // If we made the Value slot hold a TOMBPRIME, then we both
\r
1036 // prevented further updates here but also the (absent)
\r
1037 // oldval is vaccuously available in the new table. We
\r
1038 // return with true here: any thread looking for a value for
\r
1039 // this key can correctly go straight to the new table and
\r
1040 // skip looking in the old table.
\r
1041 if( box == TOMBPRIME )
\r
1043 // Otherwise we boxed something, but it still needs to be
\r
1044 // copied into the new table.
\r
1045 oldval = box; // Record updated oldval
\r
1046 break; // Break loop; oldval is now boxed by us
\r
1048 oldval = val(oldkvs,idx); // Else try, try again
\r
1050 if( oldval == TOMBPRIME ) return false; // Copy already complete here!
\r
1053 // Copy the value into the new table, but only if we overwrite a null.
\r
1054 // If another value is already in the new table, then somebody else
\r
1055 // wrote something there and that write is happens-after any value that
\r
1056 // appears in the old table. If putIfMatch does not find a null in the
\r
1057 // new table - somebody else should have recorded the null-not_null
\r
1058 // transition in this copy.
\r
1059 Object old_unboxed = ((Prime)oldval)._V;
\r
1060 assert old_unboxed != TOMBSTONE;
\r
1061 boolean copied_into_new = (putIfMatch(topmap, newkvs, key, old_unboxed, null) == null);
\r
1064 // Finally, now that any old value is exposed in the new table, we can
\r
1065 // forever hide the old-table value by slapping a TOMBPRIME down. This
\r
1066 // will stop other threads from uselessly attempting to copy this slot
\r
1067 // (i.e., it's a speed optimization not a correctness issue).
\r
1068 while( !CAS_val(oldkvs,idx,oldval,TOMBPRIME) )
\r
1069 oldval = val(oldkvs,idx);
\r
1071 return copied_into_new;
\r
1072 } // end copy_slot
\r
1076 // --- Snapshot ------------------------------------------------------------
\r
1077 // The main class for iterating over the NBHM. It "snapshots" a clean
\r
1078 // view of the K/V array.
\r
1079 private class SnapshotV implements Iterator<TypeV>, Enumeration<TypeV> {
\r
1080 final Object[] _sskvs;
\r
1081 public SnapshotV() {
\r
1082 while( true ) { // Verify no table-copy-in-progress
\r
1083 Object[] topkvs = _kvs;
\r
1084 CHM topchm = chm(topkvs);
\r
1085 if( topchm._newkvs == null ) { // No table-copy-in-progress
\r
1086 // The "linearization point" for the iteration. Every key in this
\r
1087 // table will be visited, but keys added later might be skipped or
\r
1088 // even be added to a following table (also not iterated over).
\r
1092 // Table copy in-progress - so we cannot get a clean iteration. We
\r
1093 // must help finish the table copy before we can start iterating.
\r
1094 topchm.help_copy_impl(NonBlockingHashMap.this,topkvs,true);
\r
1096 // Warm-up the iterator
\r
1099 int length() { return len(_sskvs); }
\r
1100 Object key(int idx) { return NonBlockingHashMap.key(_sskvs,idx); }
\r
1101 private int _idx; // Varies from 0-keys.length
\r
1102 private Object _nextK, _prevK; // Last 2 keys found
\r
1103 private TypeV _nextV, _prevV; // Last 2 values found
\r
1104 public boolean hasNext() { return _nextV != null; }
\r
1105 public TypeV next() {
\r
1106 // 'next' actually knows what the next value will be - it had to
\r
1107 // figure that out last go-around lest 'hasNext' report true and
\r
1108 // some other thread deleted the last value. Instead, 'next'
\r
1109 // spends all its effort finding the key that comes after the
\r
1111 if( _idx != 0 && _nextV == null ) throw new NoSuchElementException();
\r
1112 _prevK = _nextK; // This will become the previous key
\r
1113 _prevV = _nextV; // This will become the previous value
\r
1114 _nextV = null; // We have no more next-key
\r
1115 // Attempt to set <_nextK,_nextV> to the next K,V pair.
\r
1116 // _nextV is the trigger: stop searching when it is != null
\r
1117 while( _idx<length() ) { // Scan array
\r
1118 _nextK = key(_idx++); // Get a key that definitely is in the set (for the moment!)
\r
1119 if( _nextK != null && // Found something?
\r
1120 _nextK != TOMBSTONE &&
\r
1121 (_nextV=get(_nextK)) != null )
\r
1122 break; // Got it! _nextK is a valid Key
\r
1123 } // Else keep scanning
\r
1124 return _prevV; // Return current value.
\r
1126 public void remove() {
\r
1127 if( _prevV == null ) throw new IllegalStateException();
\r
1128 putIfMatch( NonBlockingHashMap.this, _sskvs, _prevK, TOMBSTONE, _prevV );
\r
1132 public TypeV nextElement() { return next(); }
\r
1133 public boolean hasMoreElements() { return hasNext(); }
\r
1136 /** Returns an enumeration of the values in this table.
\r
1137 * @return an enumeration of the values in this table
\r
1138 * @see #values() */
\r
1139 public Enumeration<TypeV> elements() { return new SnapshotV(); }
\r
1141 // --- values --------------------------------------------------------------
\r
1142 /** Returns a {@link Collection} view of the values contained in this map.
\r
1143 * The collection is backed by the map, so changes to the map are reflected
\r
1144 * in the collection, and vice-versa. The collection supports element
\r
1145 * removal, which removes the corresponding mapping from this map, via the
\r
1146 * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
\r
1147 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
\r
1148 * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
\r
1150 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that
\r
1151 * will never throw {@link ConcurrentModificationException}, and guarantees
\r
1152 * to traverse elements as they existed upon construction of the iterator,
\r
1153 * and may (but is not guaranteed to) reflect any modifications subsequent
\r
1154 * to construction. */
\r
1156 public Collection<TypeV> values() {
\r
1157 return new AbstractCollection<TypeV>() {
\r
1158 @Override public void clear ( ) { NonBlockingHashMap.this.clear ( ); }
\r
1159 @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); }
\r
1160 @Override public boolean contains( Object v ) { return NonBlockingHashMap.this.containsValue(v); }
\r
1161 @Override public Iterator<TypeV> iterator() { return new SnapshotV(); }
\r
1165 // --- keySet --------------------------------------------------------------
\r
1166 private class SnapshotK implements Iterator<TypeK>, Enumeration<TypeK> {
\r
1167 final SnapshotV _ss;
\r
1168 public SnapshotK() { _ss = new SnapshotV(); }
\r
1169 public void remove() { _ss.remove(); }
\r
1170 public TypeK next() { _ss.next(); return (TypeK)_ss._prevK; }
\r
1171 public boolean hasNext() { return _ss.hasNext(); }
\r
1172 public TypeK nextElement() { return next(); }
\r
1173 public boolean hasMoreElements() { return hasNext(); }
\r
1176 /** Returns an enumeration of the keys in this table.
\r
1177 * @return an enumeration of the keys in this table
\r
1178 * @see #keySet() */
\r
1179 public Enumeration<TypeK> keys() { return new SnapshotK(); }
\r
1181 /** Returns a {@link Set} view of the keys contained in this map. The set
\r
1182 * is backed by the map, so changes to the map are reflected in the set,
\r
1183 * and vice-versa. The set supports element removal, which removes the
\r
1184 * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
\r
1185 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
\r
1186 * <tt>clear</tt> operations. It does not support the <tt>add</tt> or
\r
1187 * <tt>addAll</tt> operations.
\r
1189 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator that
\r
1190 * will never throw {@link ConcurrentModificationException}, and guarantees
\r
1191 * to traverse elements as they existed upon construction of the iterator,
\r
1192 * and may (but is not guaranteed to) reflect any modifications subsequent
\r
1193 * to construction. */
\r
1195 public Set<TypeK> keySet() {
\r
1196 return new AbstractSet<TypeK> () {
\r
1197 @Override public void clear ( ) { NonBlockingHashMap.this.clear ( ); }
\r
1198 @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); }
\r
1199 @Override public boolean contains( Object k ) { return NonBlockingHashMap.this.containsKey(k); }
\r
1200 @Override public boolean remove ( Object k ) { return NonBlockingHashMap.this.remove (k) != null; }
\r
1201 @Override public Iterator<TypeK> iterator() { return new SnapshotK(); }
\r
1206 // --- entrySet ------------------------------------------------------------
\r
1207 // Warning: Each call to 'next' in this iterator constructs a new NBHMEntry.
\r
1208 private class NBHMEntry extends AbstractEntry<TypeK,TypeV> {
\r
1209 NBHMEntry( final TypeK k, final TypeV v ) { super(k,v); }
\r
1210 public TypeV setValue(final TypeV val) {
\r
1211 if( val == null ) throw new NullPointerException();
\r
1213 return put(_key, val);
\r
1217 private class SnapshotE implements Iterator<Map.Entry<TypeK,TypeV>> {
\r
1218 final SnapshotV _ss;
\r
1219 public SnapshotE() { _ss = new SnapshotV(); }
\r
1220 public void remove() { _ss.remove(); }
\r
1221 public Map.Entry<TypeK,TypeV> next() { _ss.next(); return new NBHMEntry((TypeK)_ss._prevK,_ss._prevV); }
\r
1222 public boolean hasNext() { return _ss.hasNext(); }
\r
1225 /** Returns a {@link Set} view of the mappings contained in this map. The
\r
1226 * set is backed by the map, so changes to the map are reflected in the
\r
1227 * set, and vice-versa. The set supports element removal, which removes
\r
1228 * the corresponding mapping from the map, via the
\r
1229 * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, <tt>removeAll</tt>,
\r
1230 * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support
\r
1231 * the <tt>add</tt> or <tt>addAll</tt> operations.
\r
1233 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
\r
1234 * that will never throw {@link ConcurrentModificationException},
\r
1235 * and guarantees to traverse elements as they existed upon
\r
1236 * construction of the iterator, and may (but is not guaranteed to)
\r
1237 * reflect any modifications subsequent to construction.
\r
1239 * <p><strong>Warning:</strong> the iterator associated with this Set
\r
1240 * requires the creation of {@link java.util.Map.Entry} objects with each
\r
1241 * iteration. The {@link NonBlockingHashMap} does not normally create or
\r
1242 * using {@link java.util.Map.Entry} objects so they will be created soley
\r
1243 * to support this iteration. Iterating using {@link #keySet} or {@link
\r
1244 * #values} will be more efficient.
\r
1247 public Set<Map.Entry<TypeK,TypeV>> entrySet() {
\r
1248 return new AbstractSet<Map.Entry<TypeK,TypeV>>() {
\r
1249 @Override public void clear ( ) { NonBlockingHashMap.this.clear( ); }
\r
1250 @Override public int size ( ) { return NonBlockingHashMap.this.size ( ); }
\r
1251 @Override public boolean remove( final Object o ) {
\r
1252 if( !(o instanceof Map.Entry)) return false;
\r
1253 final Map.Entry<?,?> e = (Map.Entry<?,?>)o;
\r
1254 return NonBlockingHashMap.this.remove(e.getKey(), e.getValue());
\r
1256 @Override public boolean contains(final Object o) {
\r
1257 if( !(o instanceof Map.Entry)) return false;
\r
1258 final Map.Entry<?,?> e = (Map.Entry<?,?>)o;
\r
1259 TypeV v = get(e.getKey());
\r
1260 return v.equals(e.getValue());
\r
1262 @Override public Iterator<Map.Entry<TypeK,TypeV>> iterator() { return new SnapshotE(); }
\r
1266 // --- writeObject -------------------------------------------------------
\r
1267 // Write a NBHM to a stream
\r
1268 private void writeObject(java.io.ObjectOutputStream s) throws IOException {
\r
1269 s.defaultWriteObject(); // Nothing to write
\r
1270 for( Object K : keySet() ) {
\r
1271 final Object V = get(K); // Do an official 'get'
\r
1272 s.writeObject(K); // Write the <TypeK,TypeV> pair
\r
1275 s.writeObject(null); // Sentinel to indicate end-of-data
\r
1276 s.writeObject(null);
\r
1279 // --- readObject --------------------------------------------------------
\r
1280 // Read a CHM from a stream
\r
1281 private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
\r
1282 s.defaultReadObject(); // Read nothing
\r
1283 initialize(MIN_SIZE);
\r
1285 final TypeK K = (TypeK) s.readObject();
\r
1286 final TypeV V = (TypeV) s.readObject();
\r
1287 if( K == null ) break;
\r
1288 put(K,V); // Insert with an offical put
\r
1292 } // End NonBlockingHashMap class
\r