--- jsr166/src/jsr166e/StripedAdder.java 2011/07/22 13:25:12 1.3 +++ jsr166/src/jsr166e/StripedAdder.java 2011/07/28 15:05:55 1.8 @@ -5,7 +5,6 @@ */ package jsr166e; -import java.util.Arrays; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -27,9 +26,9 @@ import java.io.ObjectOutputStream; * update a common sum that is used for purposes such as collecting * statistics. In this case, performance may be significantly faster * than using a shared {@link AtomicLong}, at the expense of using - * significantly more space. On the other hand, if it is known that - * only one thread can ever update the sum, performance may be - * significantly slower than just updating a local variable. + * much more space. On the other hand, if it is known that only one + * thread can ever update the sum, performance may be significantly + * slower than just updating a local variable. * *
A StripedAdder may optionally be constructed with a given
* expected contention level; i.e., the number of threads that are
@@ -43,112 +42,113 @@ public class StripedAdder implements Ser
private static final long serialVersionUID = 7249069246863182397L;
/*
- * Overview: We maintain a table of Atomic long variables. The
- * table is indexed by per-thread hash codes that are initialized
- * to random values.
+ * A StripedAdder maintains a table of Atomic long variables. The
+ * table is indexed by per-thread hash codes.
*
- * The table doubles in size upon contention (as indicated by
- * failed CASes when performing add()), but is capped at the
- * nearest power of two >= #CPUS. This reflects the idea that,
- * when there are more threads than CPUs, then if each thread were
- * bound to a CPU, there would exist a perfect hash function
- * mapping threads to slots that eliminates collisions. When we
- * reach capacity, we search for this mapping by randomly varying
- * the hash codes of colliding threads. Because search is random,
- * and failures only become known via CAS failures, convergence
- * will be slow, and because threads are typically not bound to
- * CPUS forever, may not occur at all. However, despite these
- * limitations, observed contention is typically very low in these
- * cases.
- *
- * Table entries are of class Adder; a form of AtomicLong padded
- * to reduce cache contention on most processors. Padding is
- * overkill for most Atomics because they are most often
+ * Table entries are of class Adder; a variant of AtomicLong
+ * padded to reduce cache contention on most processors. Padding
+ * is overkill for most Atomics because they are usually
* irregularly scattered in memory and thus don't interfere much
* with each other. But Atomic objects residing in arrays will
* tend to be placed adjacent to each other, and so will most
- * often share cache lines without this precaution. Except for
- * slot adders[0], Adders are constructed upon first use, which
- * further improves per-thread locality and helps reduce (an
- * already large) footprint.
+ * often share cache lines (with a huge negative performance
+ * impact) without this precaution.
+ *
+ * Because Adders are relatively large, we avoid creating them
+ * until they are needed. On the other hand, we try to create them
+ * on any sign of contention.
+ *
+ * Per-thread hash codes are initialized to random values.
+ * Collisions are indicated by failed CASes when performing an add
+ * operation (see method retryAdd). Upon a collision, if the table
+ * size is less than the capacity, it is doubled in size unless
+ * some other thread holds lock. If a hashed slot is empty, and
+ * lock is available, a new Adder is created. Otherwise, if the
+ * slot exists, a CAS is tried. Retries proceed by "double
+ * hashing", using a secondary hash (Marsaglia XorShift) to try to
+ * find a free slot.
+ *
+ * By default, the table is lazily initialized. Upon first use,
+ * the table is set to size 2 (the minimum non-empty size), but
+ * containing only a single Adder. The maximum table size is
+ * bounded by nearest power of two >= the number of CPUS. The
+ * table size is capped because, when there are more threads than
+ * CPUs, supposing that each thread were bound to a CPU, there
+ * would exist a perfect hash function mapping threads to slots
+ * that eliminates collisions. When we reach capacity, we search
+ * for this mapping by randomly varying the hash codes of
+ * colliding threads. Because search is random, and failures only
+ * become known via CAS failures, convergence will be slow, and
+ * because threads are typically not bound to CPUS forever, may
+ * not occur at all. However, despite these limitations, observed
+ * contention is typically low in these cases.
*
* A single spinlock is used for resizing the table as well as
- * populating slots with new Adders. Upon lock contention, threads
- * try other slots rather than blocking. We guarantee that at
- * least one slot (0) exists, so retries will eventually find a
- * candidate Adder. During these retries, there is increased
+ * populating slots with new Adders. After initialization, there
+ * is no need for a blocking lock: Upon lock contention, threads
+ * try other slots rather than blocking. After initialization, at
+ * least one slot exists, so retries will eventually find a
+ * candidate Adder. During these retries, there is increased
* contention and reduced locality, which is still better than
* alternatives.
*/
- /**
- * Number of processors, to place a cap on table growth.
- */
- static final int NCPU = Runtime.getRuntime().availableProcessors();
+ private static final int NCPU = Runtime.getRuntime().availableProcessors();
/**
- * Padded version of AtomicLong
+ * Padded variant of AtomicLong. The value field is placed
+ * between pads, hoping that the JVM doesn't reorder them.
+ * Updates are via inlined CAS in methods add and retryAdd.
*/
- static final class Adder extends AtomicLong {
- long p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd;
- Adder(long x) { super(x); }
+ static final class Adder {
+ volatile long p0, p1, p2, p3, p4, p5, p6;
+ volatile long value;
+ volatile long q0, q1, q2, q3, q4, q5, q6;
+ Adder(long x) { value = x; }
}
/**
- * Holder for the thread-local hash code. The code starts off with
- * a given random value, but may be set to a different
- * pseudo-random value (using a cheaper but adequate xorshift
- * generator) upon collisions.
+ * Holder for the thread-local hash code. The code is initially
+ * random, but may be set to a different value upon collisions.
*/
static final class HashCode {
+ static final Random rng = new Random();
int code;
- HashCode(int h) { code = h; }
+ HashCode() {
+ int h = rng.nextInt(); // Avoid zero, because of xorShift rehash
+ code = (h == 0) ? 1 : h;
+ }
}
/**
* The corresponding ThreadLocal class
*/
static final class ThreadHashCode extends ThreadLocal