ConcurrentHashMap
ConcurrentHashMap
是HashMap
的线程安全版本,相较于Hashtable
,不需要方法级别的同步,而是采用自旋+CAS
,将哈希表的每个索引位视为一个桶(同步单位),当对ConcurrentHashMap
进行结构性修改时,只需要锁住修改元素所在的桶即可,其他不需要上锁同步,从而达到减小锁的粒度,资源竞争争抢降低,提供并发性能。
ConcurrentHashMap
中键、值都不允许为null。
链表节点内部类
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;
Node<K,V> find(int h, Object k) {
Node<K,V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
红黑树节点内部类
static final class TreeNode<K,V> extends Node<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
if (k != null) {
TreeNode<K,V> p = this;
do {
int ph, dir; K pk; TreeNode<K,V> q;
TreeNode<K,V> pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.findTreeNode(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
}
return null;
}
}
关键属性
// 最大容量
private static final int MAXIMUM_CAPACITY = 1 << 30;
// 默认容量16
private static final int DEFAULT_CAPACITY = 16;
// 最大桶数
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
// 1.8开始该属性已不在使用,向前兼容
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
// 默认负载因子0.75, 通常使用[n - (n >>> 2)]来得到扩容阈值
private static final float LOAD_FACTOR = 0.75f;
// 树化阈值。桶中链表长度达到8,链表转化为红黑树
static final int TREEIFY_THRESHOLD = 8;
// 反树化阈值。桶中红黑树节点数量减少到6,红黑树转化为链表
static final int UNTREEIFY_THRESHOLD = 6;
// 当且仅当,桶中链表长度达到8,且桶数量达到64,才树化,否则,通过扩容减少哈希碰撞
static final int MIN_TREEIFY_CAPACITY = 64;
// 哈希表
transient volatile Node<K,V>[] table;
// 只有扩容时才不为null
private transient volatile Node<K,V>[] nextTable;
// 总元素个数基值
private transient volatile long baseCount;
// 初始容量、扩容控制。 -1:初始化容量ing -1(1+ activeThread)扩容ing
private transient volatile int sizeCtl;
private transient volatile int transferIndex;
private transient volatile int cellsBusy;
private transient volatile CounterCell[] counterCells;
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT;
static {
try {
U = sun.misc.Unsafe.getUnsafe();
Class<?> k = ConcurrentHashMap.class;
SIZECTL = U.objectFieldOffset
(k.getDeclaredField("sizeCtl"));
TRANSFERINDEX = U.objectFieldOffset
(k.getDeclaredField("transferIndex"));
BASECOUNT = U.objectFieldOffset
(k.getDeclaredField("baseCount"));
CELLSBUSY = U.objectFieldOffset
(k.getDeclaredField("cellsBusy"));
Class<?> ck = CounterCell.class;
CELLVALUE = U.objectFieldOffset
(ck.getDeclaredField("value"));
Class<?> ak = Node[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
构造函数
public ConcurrentHashMap() {
}
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
转化容量为2的次幂
private static final int tableSizeFor(int c) {
int n = c - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
新增元素
/**
* key-val存在,onlyIfAbsent=false,返回oldVal,替换oldVal为newVal
* key-val存在,onlyIfAbsent=false,返回oldVal
* key-val不存在,返回null
*/
public V put(K key, V value) {
return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {
// key和hash都不能为null
if (key == null || value == null) throw new NullPointerException();
// 计算hash值
int hash = spread(key.hashCode());
// 桶中元素个数
int binCount = 0;
// 自旋 + CAS + synchronized锁桶
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
// 哈希表为空,初始化哈希表
if (tab == null || (n = tab.length) == 0)
// 对哈希表进行初始化
tab = initTable();
// 如果桶中还没有元素,直接CAS更新该桶的第一个元素为当前元素
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
// 该桶中的元素正在迁移,当前线程加入协助扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
// 哈希表已经初始化,桶中已经有元素,则向桶中加入元素
else {
V oldVal = null;
// 锁住当前桶 相较于hashtabl锁住整个对象,这里只需要锁住一个桶(锁粒度小),减少了线程之间的竞争;
// 相较于HashMap,由于同步处理,可以在略微降低性能的情况下保证并发安全
synchronized (f) {
// 校验桶的第一个元素是否发生变化
if (tabAt(tab, i) == f) {
// 桶第一个元素哈希值大于0,说明桶中不存在元素迁移
if (fh >= 0) {
binCount = 1;
// 如果存在相同的key,修改val(onlyIfAbsent=true),否则新增节点加入链表
for (Node<K,V> e = f;; ++binCount) {
K ek;
// 存在相同的key
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
// 替换oldVal为newVal
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
// 到了链表的最后,新建节点,加入链表
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
// 红黑树
else if (f instanceof TreeBin) {
Node<K,V> p;
// 如果桶中已经是红黑树,那么binCount = 2,在这里表明桶已树化
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
// binCount不为0,则添加成功
if (binCount != 0) {
// binCount == 8,并且哈希表长度达到64,进行树化
// 如果桶已树化,那么binCount一定等于2,小于8,从而避免重复树化
// 判断是否需要树化
if (binCount >= TREEIFY_THRESHOLD)
// 树化
treeifyBin(tab, i);
if (oldVal != null)
// 返回oldVal
return oldVal;
break;
}
}
}
// 增加元素数量
addCount(1L, binCount);
// 元素是新增,返回值为null
return null;
}
/**
* 计算哈希值
*/
static final int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS;
}
/**
* 初始化哈希表
*/
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
// 自旋初始化
while ((tab = table) == null || tab.length == 0) {
// sizeCtl < 0表明正在进行初始化或者扩容,那么当前线程让出cpu
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
// CAS更新sizeCtl = -1,即正在初始化
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
// 容量值sizeCtl,默认为默认容量大小
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
// 创建哈希表
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
// 设置扩容阈值 0.75
sc = n - (n >>> 2);
}
} finally {
// 由于CAS更新同一时刻只能有一个线程进来,所以这里直接更新即可
// 更行sizeCtl为扩容阈值
sizeCtl = sc;
}
break;
}
}
return tab;
}
增加元素数量
执行逻辑类似
LongAddr
private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
// counterCells != null已经出现竞争,通过将并发线程分散到不同的cell中,从而降低线程竞争
// counterCells == null还未出现竞争,直接CAS更新base即可
if ((as = counterCells) != null || // true,出现竞争
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a; long v; int m;
// uncontended = true还未出现竞争
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 || // cell还未初始化
(a = as[ThreadLocalRandom.getProbe() & m]) == null || // 当前线程路由到cell不存在
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { // CAS更新cell
// cell更新失败,进入进行自旋+CAS更新或扩容+更新
fullAddCount(x, uncontended);
return;
}
if (check <= 1)
return;
// 计算元素总数
s = sumCount();
}
if (check >= 0) {
Node<K,V>[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
// rs扩容邮戳
int rs = resizeStamp(n);
// sc < 0 扩容中
if (sc < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
// 扩容完成,退出循环
break;
// 扩容未完成,则当前线程加入迁移元素,并把扩容线程+1
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
// 触发扩容的线程会来到这里,
// sizeCtl的高16位存储rs这个邮戳,低16位存储(1 + nThreads)
// 进入元素迁移
transfer(tab, null);
// 重新计算元素个数
s = sumCount();
}
}
}
private final void fullAddCount(long x, boolean wasUncontended) {
int h;
if ((h = ThreadLocalRandom.getProbe()) == 0) {
ThreadLocalRandom.localInit(); // force initialization
h = ThreadLocalRandom.getProbe();
wasUncontended = true;
}
// false,没有碰撞
boolean collide = false; // True if last slot nonempty
// 自旋+CAS更新cell
for (;;) {
CounterCell[] as; CounterCell a; int n; long v;
// cell已经初始化,出现竞争
if ((as = counterCells) != null && (n = as.length) > 0) {
// 路由cell为null
if ((a = as[(n - 1) & h]) == null) {
// 没有其他线程在更新cell
if (cellsBusy == 0) { // Try to attach new Cell
// 创建cell
CounterCell r = new CounterCell(x); // Optimistic create
// CAS更新cell
if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean created = false;
try { // Recheck under lock
CounterCell[] rs; int m, j;
if ((rs = counterCells) != null &&
(m = rs.length) > 0 &&
rs[j = (m - 1) & h] == null) {
// 新建cell加入
rs[j] = r;
created = true;
}
} finally {
cellsBusy = 0;
}
if (created)
break;
continue; // Slot is now non-empty
}
}
collide = false;
}
else if (!wasUncontended) // CAS already known to fail
wasUncontended = true; // Continue after rehash
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
break;
else if (counterCells != as || n >= NCPU)
collide = false; // At max size or stale
else if (!collide)
collide = true;
else if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
try {
// 扩容counterCells
if (counterCells == as) {// Expand table unless stale
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
counterCells = rs;
}
} finally {
cellsBusy = 0;
}
collide = false;
continue; // Retry with expanded table
}
h = ThreadLocalRandom.advanceProbe(h);
}
// 初始化counterCells
else if (cellsBusy == 0 && counterCells == as &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean init = false;
try { // Initialize table
if (counterCells == as) {
CounterCell[] rs = new CounterCell[2];
rs[h & 1] = new CounterCell(x);
counterCells = rs;
init = true;
}
} finally {
cellsBusy = 0;
}
if (init)
break;
}
// 更新base
else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
break; // Fall back on using base
}
}
协助扩容
线程添加元素时发现正在扩容,且当前元素所在的桶元素已经完成迁移,则协助迁移其他桶的元素
final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
Node<K,V>[] nextTab; int sc;
// 1、哈希表不为null
// 2、桶的第一个元素是ForwardingNode类型,并且nextTab不为null
// 说明当前桶已经迁移完成,采取帮忙迁移其他桶的元素
// 扩容时会把旧桶的第一个元素置为ForwardingNode,并让其nextTab指向新桶
if (tab != null && (f instanceof ForwardingNode) &&
(nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
int rs = resizeStamp(tab.length);
// sizeCtl < 0说明正在扩容
while (nextTab == nextTable && table == tab &&
(sc = sizeCtl) < 0) {
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || transferIndex <= 0)
break;
// 扩容线程数+1
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
// 加入,协助其他线程一起扩容
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return table;
}
/**
* 扩容完成之后,容量将会扩大一倍
*/
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
// 最小扩容线程数
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
// nextTab为null,说明还没有开始迁移元素,创建nextTab作为新桶
if (nextTab == null) { // initiating
try {
// n << 1 容量扩大一倍
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
// 赋值nextTable
nextTable = nextTab;
// 迁移位置
transferIndex = n;
}
int nextn = nextTab.length;
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
// 计算i的值,i从旧桶长度-1开始递减,一直减到1
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
// 遍历完成,即元素已经全部迁移
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
// finish = true,扩容已完成
if (finishing) {
nextTable = null;
// 使用新哈希表替换旧hash表
table = nextTab;
// 新的扩容阈值, 0.75nextn
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
// 迁移元素到新桶
else {
// 锁住当前桶(旧)
synchronized (f) {
// 检查桶中第一个元素是否变化
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
// 将元素迁移到新建哈希表中
// key.hash & nextTab.length == 0,迁移到旧桶对应index位置,否则迁移到旧桶对应index + 旧桶容量位置
// 迁移后原来在同一个桶中的元素可能会分化成两个链表或者两棵红黑树
// 链表
if (fh >= 0) {
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
// key.hash & nextTab.length = 0的元素落在这个桶中
setTabAt(nextTab, i, ln);
// key.hash & nextTab.length != 0的元素落在这个桶中
setTabAt(nextTab, i + n, hn);
// 标记桶中元素已迁移
setTabAt(tab, i, fwd);
advance = true;
}
// 红黑树
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
// key.hash & nextTab.length = 0的元素落在这个桶中
setTabAt(nextTab, i, ln);
// key.hash & nextTab.length != 0的元素落在这个桶中
setTabAt(nextTab, i + n, hn);
// 标记桶中元素已迁移
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
获取元素
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
// 计算key的哈希值
int h = spread(key.hashCode());
//哈希表不空,且key所在桶有元素
// tabAt(tab, (n - 1) & h)获取key所在桶的第一个元素
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
// 如果第一个节点便相等,直接返回对应的value
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
// 头节点的hash<0,说明桶中元素已经转化为红黑树的形式
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
// 桶中元元素是链表形式,遍历链表查找
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
// 哈希表空或者key所在桶没有元素或没有对应的key-value,直接返回null
return null;
}
contains族
// 是否存在指定key
public boolean containsKey(Object key) {
return get(key) != null;
}
// 是否存在指定的value
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
V v;
if ((v = p.val) == value || (v != null && value.equals(v)))
return true;
}
}
return false;
}
public boolean contains(Object value) {
return containsValue(value);
}
移除元素
public V remove(Object key) {
return replaceNode(key, null, null);
}
final V replaceNode(Object key, V value, Object cv) {
// 计算key的哈希
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
// 如果哈希表为空或者key所在桶没有元素,直接跳出循环
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
// 桶中元素在迁移,加入协助迁移元素
else if ((fh = f.hash) == tongMOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
// 锁住元素所在的桶
synchronized (f) {
// 校验桶的头节点是否发生变化
if (tabAt(tab, i) == f) {
// 桶中元素以链表存在,遍历链表替换value
// 如果value为null,说明是要移除当前key-val
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
// 修改节点值
if (value != null)
e.val = value;
// 移除节点
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
// 桶中元素是红黑树
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
// 替换节点value
if (value != null)
p.val = value;
// 移除节点,成功后检测是否需要反树化
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
public boolean remove(Object key, Object value) {
if (key == null)
throw new NullPointerException();
return value != null && replaceNode(key, null, value) != null;
}
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
return replaceNode(key, newValue, oldValue) != null;
}
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
return replaceNode(key, newValue, oldValue) != null;
}
Size
public int size() {
long n = sumCount();
return ((n < 0L) ? 0 :
(n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
(int)n);
}
// base + cell[]
final long sumCount() {
CounterCell[] as = counterCells; CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
}
return sum;
}
其他
// 如果key不存在,对key应用mappingFunction,算出的结果作为value,将key-value插入map中,如果value为null,不做任何操作
computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)
// 如果key存在,对key和oldVal应用remappingFunction,算出结果作为newVal,将key-newVal插入map,如果newVal为null,则移除key对应的节点
computeIfPresent(K key, BiFunction<? super K, ? extends V> remappingFunction)
// computeIfAbsent和computeIfPresent方法的结合
// 如果key存在,对key和value应用remappingFunction,计算出newVal,如果newVal为null,移除key
// 如果key不存在,对key应用remappingFunction,计算出newVal,newVal不空,将key-newVal插入map
compute(K key, BiFunction<? super K, ? extends V> remappingFunction)
// 如果key不存在,直接将key-val插入map
// 如果key存在,对两个val应用remppingFunction,计算出newVal,如果newVal为空,移除key,否则更新为key-newVal
merge(K, v, BiFunction<? super K, ? extends V> remappingFunction)