Clover coverage report -
Coverage timestamp: So Nov 6 2005 14:19:51 CET
file stats: LOC: 2.084   Methods: 89
NCLOC: 922   Classes: 5
 
 Source file Conditionals Statements Methods TOTAL
AbstractConcurrentReadCache.java 60,2% 63,1% 52,8% 61,2%
coverage coverage
 1    /*
 2    * Copyright (c) 2002-2003 by OpenSymphony
 3    * All rights reserved.
 4    */
 5    /*
 6    File: AbstractConcurrentReadCache
 7   
 8    Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
 9    which carries the following copyright:
 10   
 11    * Copyright 1997 by Sun Microsystems, Inc.,
 12    * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
 13    * All rights reserved.
 14    *
 15    * This software is the confidential and proprietary information
 16    * of Sun Microsystems, Inc. ("Confidential Information"). You
 17    * shall not disclose such Confidential Information and shall use
 18    * it only in accordance with the terms of the license agreement
 19    * you entered into with Sun.
 20   
 21    This class is a modified version of ConcurrentReaderHashMap, which was written
 22    by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
 23    by Pyxis Technologies. This is a base class for the OSCache module of the
 24    openSymphony project (www.opensymphony.com).
 25   
 26    History:
 27    Date Who What
 28    28oct1999 dl Created
 29    14dec1999 dl jmm snapshot
 30    19apr2000 dl use barrierLock
 31    12jan2001 dl public release
 32    Oct2001 abergevin@pyxis-tech.com
 33    Integrated persistence and outer algorithm support
 34    */
 35    package com.opensymphony.oscache.base.algorithm;
 36   
 37   
 38    /** OpenSymphony BEGIN */
 39    import com.opensymphony.oscache.base.CacheEntry;
 40    import com.opensymphony.oscache.base.persistence.CachePersistenceException;
 41    import com.opensymphony.oscache.base.persistence.PersistenceListener;
 42   
 43    import org.apache.commons.logging.Log;
 44    import org.apache.commons.logging.LogFactory;
 45   
 46    import java.io.IOException;
 47    import java.io.Serializable;
 48   
 49    import java.util.*;
 50   
 51    /**
 52    * A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
 53    * Because reads are not limited to periods
 54    * without writes, a concurrent reader policy is weaker than a classic
 55    * reader/writer policy, but is generally faster and allows more
 56    * concurrency. This class is a good choice especially for tables that
 57    * are mainly created by one thread during the start-up phase of a
 58    * program, and from then on, are mainly read (with perhaps occasional
 59    * additions or removals) in many threads. If you also need concurrency
 60    * among writes, consider instead using ConcurrentHashMap.
 61    * <p>
 62    *
 63    * Successful retrievals using get(key) and containsKey(key) usually
 64    * run without locking. Unsuccessful ones (i.e., when the key is not
 65    * present) do involve brief synchronization (locking). Also, the
 66    * size and isEmpty methods are always synchronized.
 67    *
 68    * <p> Because retrieval operations can ordinarily overlap with
 69    * writing operations (i.e., put, remove, and their derivatives),
 70    * retrievals can only be guaranteed to return the results of the most
 71    * recently <em>completed</em> operations holding upon their
 72    * onset. Retrieval operations may or may not return results
 73    * reflecting in-progress writing operations. However, the retrieval
 74    * operations do always return consistent results -- either those
 75    * holding before any single modification or after it, but never a
 76    * nonsense result. For aggregate operations such as putAll and
 77    * clear, concurrent reads may reflect insertion or removal of only
 78    * some entries. In those rare contexts in which you use a hash table
 79    * to synchronize operations across threads (for example, to prevent
 80    * reads until after clears), you should either encase operations
 81    * in synchronized blocks, or instead use java.util.Hashtable.
 82    *
 83    * <p>
 84    *
 85    * This class also supports optional guaranteed
 86    * exclusive reads, simply by surrounding a call within a synchronized
 87    * block, as in <br>
 88    * <code>AbstractConcurrentReadCache t; ... Object v; <br>
 89    * synchronized(t) { v = t.get(k); } </code> <br>
 90    *
 91    * But this is not usually necessary in practice. For
 92    * example, it is generally inefficient to write:
 93    *
 94    * <pre>
 95    * AbstractConcurrentReadCache t; ... // Inefficient version
 96    * Object key; ...
 97    * Object value; ...
 98    * synchronized(t) {
 99    * if (!t.containsKey(key))
 100    * t.put(key, value);
 101    * // other code if not previously present
 102    * }
 103    * else {
 104    * // other code if it was previously present
 105    * }
 106    * }
 107    *</pre>
 108    * Instead, just take advantage of the fact that put returns
 109    * null if the key was not previously present:
 110    * <pre>
 111    * AbstractConcurrentReadCache t; ... // Use this instead
 112    * Object key; ...
 113    * Object value; ...
 114    * Object oldValue = t.put(key, value);
 115    * if (oldValue == null) {
 116    * // other code if not previously present
 117    * }
 118    * else {
 119    * // other code if it was previously present
 120    * }
 121    *</pre>
 122    * <p>
 123    *
 124    * Iterators and Enumerations (i.e., those returned by
 125    * keySet().iterator(), entrySet().iterator(), values().iterator(),
 126    * keys(), and elements()) return elements reflecting the state of the
 127    * hash table at some point at or since the creation of the
 128    * iterator/enumeration. They will return at most one instance of
 129    * each element (via next()/nextElement()), but might or might not
 130    * reflect puts and removes that have been processed since they were
 131    * created. They do <em>not</em> throw ConcurrentModificationException.
 132    * However, these iterators are designed to be used by only one
 133    * thread at a time. Sharing an iterator across multiple threads may
 134    * lead to unpredictable results if the table is being concurrently
 135    * modified. Again, you can ensure interference-free iteration by
 136    * enclosing the iteration in a synchronized block. <p>
 137    *
 138    * This class may be used as a direct replacement for any use of
 139    * java.util.Hashtable that does not depend on readers being blocked
 140    * during updates. Like Hashtable but unlike java.util.HashMap,
 141    * this class does NOT allow <tt>null</tt> to be used as a key or
 142    * value. This class is also typically faster than ConcurrentHashMap
 143    * when there is usually only one thread updating the table, but
 144    * possibly many retrieving values from it.
 145    * <p>
 146    *
 147    * Implementation note: A slightly faster implementation of
 148    * this class will be possible once planned Java Memory Model
 149    * revisions are in place.
 150    *
 151    * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
 152    **/
 153    public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable, Serializable {
 154    /**
 155    * The default initial number of table slots for this table (32).
 156    * Used when not otherwise specified in constructor.
 157    **/
 158    public static int DEFAULT_INITIAL_CAPACITY = 32;
 159   
 160    /**
 161    * The minimum capacity.
 162    * Used if a lower value is implicitly specified
 163    * by either of the constructors with arguments.
 164    * MUST be a power of two.
 165    */
 166    private static final int MINIMUM_CAPACITY = 4;
 167   
 168    /**
 169    * The maximum capacity.
 170    * Used if a higher value is implicitly specified
 171    * by either of the constructors with arguments.
 172    * MUST be a power of two <= 1<<30.
 173    */
 174    private static final int MAXIMUM_CAPACITY = 1 << 30;
 175   
 176    /**
 177    * The default load factor for this table.
 178    * Used when not otherwise specified in constructor, the default is 0.75f.
 179    **/
 180    public static final float DEFAULT_LOAD_FACTOR = 0.75f;
 181   
 182    //OpenSymphony BEGIN (pretty long!)
 183    protected static final String NULL = "_nul!~";
 184    protected static Log log = LogFactory.getLog(AbstractConcurrentReadCache.class);
 185   
 186    /*
 187    The basic strategy is an optimistic-style scheme based on
 188    the guarantee that the hash table and its lists are always
 189    kept in a consistent enough state to be read without locking:
 190   
 191    * Read operations first proceed without locking, by traversing the
 192    apparently correct list of the apparently correct bin. If an
 193    entry is found, but not invalidated (value field null), it is
 194    returned. If not found, operations must recheck (after a memory
 195    barrier) to make sure they are using both the right list and
 196    the right table (which can change under resizes). If
 197    invalidated, reads must acquire main update lock to wait out
 198    the update, and then re-traverse.
 199   
 200    * All list additions are at the front of each bin, making it easy
 201    to check changes, and also fast to traverse. Entry next
 202    pointers are never assigned. Remove() builds new nodes when
 203    necessary to preserve this.
 204   
 205    * Remove() (also clear()) invalidates removed nodes to alert read
 206    operations that they must wait out the full modifications.
 207   
 208    */
 209   
 210    /**
 211    * Lock used only for its memory effects. We use a Boolean
 212    * because it is serializable, and we create a new one because
 213    * we need a unique object for each cache instance.
 214    **/
 215    protected final Boolean barrierLock = new Boolean(true);
 216   
 217    /**
 218    * field written to only to guarantee lock ordering.
 219    **/
 220    protected transient Object lastWrite;
 221   
 222    /**
 223    * The hash table data.
 224    */
 225    protected transient Entry[] table;
 226   
 227    /**
 228    * The total number of mappings in the hash table.
 229    */
 230    protected transient int count;
 231   
 232    /**
 233    * Persistence listener.
 234    */
 235    protected PersistenceListener persistenceListener = null;
 236   
 237    /**
 238    * Use memory cache or not.
 239    */
 240    protected boolean memoryCaching = true;
 241   
 242    /**
 243    * Use unlimited disk caching.
 244    */
 245    protected boolean unlimitedDiskCache = false;
 246   
 247    /**
 248    * The load factor for the hash table.
 249    *
 250    * @serial
 251    */
 252    protected float loadFactor;
 253   
 254    /**
 255    * Default cache capacity (number of entries).
 256    */
 257    protected final int DEFAULT_MAX_ENTRIES = 100;
 258   
 259    /**
 260    * Max number of element in cache when considered unlimited.
 261    */
 262    protected final int UNLIMITED = 2147483646;
 263    protected transient Collection values = null;
 264   
 265    /**
 266    * A HashMap containing the group information.
 267    * Each entry uses the group name as the key, and holds a
 268    * <code>Set</code> of containing keys of all
 269    * the cache entries that belong to that particular group.
 270    */
 271    protected HashMap groups = new HashMap();
 272    protected transient Set entrySet = null;
 273   
 274    // Views
 275    protected transient Set keySet = null;
 276   
 277    /**
 278    * Cache capacity (number of entries).
 279    */
 280    protected int maxEntries = DEFAULT_MAX_ENTRIES;
 281   
 282    /**
 283    * The table is rehashed when its size exceeds this threshold.
 284    * (The value of this field is always (int)(capacity * loadFactor).)
 285    *
 286    * @serial
 287    */
 288    protected int threshold;
 289   
 290    /**
 291    * Use overflow persistence caching.
 292    */
 293    private boolean overflowPersistence = false;
 294   
 295    /**
 296    * Constructs a new, empty map with the specified initial capacity and the specified load factor.
 297    *
 298    * @param initialCapacity the initial capacity
 299    * The actual initial capacity is rounded to the nearest power of two.
 300    * @param loadFactor the load factor of the AbstractConcurrentReadCache
 301    * @throws IllegalArgumentException if the initial maximum number
 302    * of elements is less
 303    * than zero, or if the load factor is nonpositive.
 304    */
 305  132 public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) {
 306  132 if (loadFactor <= 0) {
 307  0 throw new IllegalArgumentException("Illegal Load factor: " + loadFactor);
 308    }
 309   
 310  132 this.loadFactor = loadFactor;
 311   
 312  132 int cap = p2capacity(initialCapacity);
 313  132 table = new Entry[cap];
 314  132 threshold = (int) (cap * loadFactor);
 315    }
 316   
 317    /**
 318    * Constructs a new, empty map with the specified initial capacity and default load factor.
 319    *
 320    * @param initialCapacity the initial capacity of the
 321    * AbstractConcurrentReadCache.
 322    * @throws IllegalArgumentException if the initial maximum number
 323    * of elements is less
 324    * than zero.
 325    */
 326  0 public AbstractConcurrentReadCache(int initialCapacity) {
 327  0 this(initialCapacity, DEFAULT_LOAD_FACTOR);
 328    }
 329   
 330    /**
 331    * Constructs a new, empty map with a default initial capacity and load factor.
 332    */
 333  132 public AbstractConcurrentReadCache() {
 334  132 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
 335    }
 336   
 337    /**
 338    * Constructs a new map with the same mappings as the given map.
 339    * The map is created with a capacity of twice the number of mappings in
 340    * the given map or 11 (whichever is greater), and a default load factor.
 341    */
 342  0 public AbstractConcurrentReadCache(Map t) {
 343  0 this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
 344  0 putAll(t);
 345    }
 346   
 347    /**
 348    * Returns <tt>true</tt> if this map contains no key-value mappings.
 349    *
 350    * @return <tt>true</tt> if this map contains no key-value mappings.
 351    */
 352  0 public synchronized boolean isEmpty() {
 353  0 return count == 0;
 354    }
 355   
 356    /**
 357    * Returns a set of the cache keys that reside in a particular group.
 358    *
 359    * @param groupName The name of the group to retrieve.
 360    * @return a set containing all of the keys of cache entries that belong
 361    * to this group, or <code>null</code> if the group was not found.
 362    * @exception NullPointerException if the groupName is <code>null</code>.
 363    */
 364  36 public Set getGroup(String groupName) {
 365  36 if (log.isDebugEnabled()) {
 366  0 log.debug("getGroup called (group=" + groupName + ")");
 367    }
 368   
 369  36 Set groupEntries = null;
 370   
 371  36 if (memoryCaching && (groups != null)) {
 372  27 groupEntries = (Set) getGroupForReading(groupName);
 373    }
 374   
 375  36 if (groupEntries == null) {
 376    // Not in the map, try the persistence layer
 377  12 groupEntries = persistRetrieveGroup(groupName);
 378    }
 379   
 380  36 return groupEntries;
 381    }
 382   
 383    /**
 384    * Set the cache capacity
 385    */
 386  80 public void setMaxEntries(int newLimit) {
 387  80 if (newLimit > 0) {
 388  64 maxEntries = newLimit;
 389   
 390  64 synchronized (this) { // because remove() isn't synchronized
 391   
 392  64 while (size() > maxEntries) {
 393  16 remove(removeItem(), false);
 394    }
 395    }
 396    } else {
 397    // Capacity must be at least 1
 398  16 throw new IllegalArgumentException("Cache maximum number of entries must be at least 1");
 399    }
 400    }
 401   
 402    /**
 403    * Retrieve the cache capacity (number of entries).
 404    */
 405  32 public int getMaxEntries() {
 406  32 return maxEntries;
 407    }
 408   
 409    /**
 410    * Sets the memory caching flag.
 411    */
 412  132 public void setMemoryCaching(boolean memoryCaching) {
 413  132 this.memoryCaching = memoryCaching;
 414    }
 415   
 416    /**
 417    * Check if memory caching is used.
 418    */
 419  24 public boolean isMemoryCaching() {
 420  24 return memoryCaching;
 421    }
 422   
 423    /**
 424    * Set the persistence listener to use.
 425    */
 426  102 public void setPersistenceListener(PersistenceListener listener) {
 427  102 this.persistenceListener = listener;
 428    }
 429   
 430    /**
 431    * Get the persistence listener.
 432    */
 433  64 public PersistenceListener getPersistenceListener() {
 434  64 return persistenceListener;
 435    }
 436   
 437    /**
 438    * Sets the unlimited disk caching flag.
 439    */
 440  108 public void setUnlimitedDiskCache(boolean unlimitedDiskCache) {
 441  108 this.unlimitedDiskCache = unlimitedDiskCache;
 442    }
 443   
 444    /**
 445    * Check if we use unlimited disk cache.
 446    */
 447  0 public boolean isUnlimitedDiskCache() {
 448  0 return unlimitedDiskCache;
 449    }
 450   
 451    /**
 452    * Check if we use overflowPersistence
 453    *
 454    * @return Returns the overflowPersistence.
 455    */
 456  16 public boolean isOverflowPersistence() {
 457  16 return this.overflowPersistence;
 458    }
 459   
 460    /**
 461    * Sets the overflowPersistence flag
 462    *
 463    * @param overflowPersistence The overflowPersistence to set.
 464    */
 465  132 public void setOverflowPersistence(boolean overflowPersistence) {
 466  132 this.overflowPersistence = overflowPersistence;
 467    }
 468   
 469    /**
 470    * Return the number of slots in this table.
 471    **/
 472  0 public synchronized int capacity() {
 473  0 return table.length;
 474    }
 475   
 476    /**
 477    * Removes all mappings from this map.
 478    */
 479  204 public synchronized void clear() {
 480  204 Entry[] tab = table;
 481   
 482  204 for (int i = 0; i < tab.length; ++i) {
 483    // must invalidate all to force concurrent get's to wait and then retry
 484  6528 for (Entry e = tab[i]; e != null; e = e.next) {
 485  204 e.value = null;
 486   
 487    /** OpenSymphony BEGIN */
 488  204 itemRemoved(e.key);
 489   
 490    /** OpenSymphony END */
 491    }
 492   
 493  6528 tab[i] = null;
 494    }
 495   
 496    // Clean out the entire disk cache
 497  204 persistClear();
 498   
 499  204 count = 0;
 500  204 recordModification(tab);
 501    }
 502   
 503    /**
 504    * Returns a shallow copy of this.
 505    * <tt>AbstractConcurrentReadCache</tt> instance: the keys and
 506    * values themselves are not cloned.
 507    *
 508    * @return a shallow copy of this map.
 509    */
 510  0 public synchronized Object clone() {
 511  0 try {
 512  0 AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
 513  0 t.keySet = null;
 514  0 t.entrySet = null;
 515  0 t.values = null;
 516   
 517  0 Entry[] tab = table;
 518  0 t.table = new Entry[tab.length];
 519   
 520  0 Entry[] ttab = t.table;
 521   
 522  0 for (int i = 0; i < tab.length; ++i) {
 523  0 Entry first = tab[i];
 524   
 525  0 if (first != null) {
 526  0 ttab[i] = (Entry) (first.clone());
 527    }
 528    }
 529   
 530  0 return t;
 531    } catch (CloneNotSupportedException e) {
 532    // this shouldn't happen, since we are Cloneable
 533  0 throw new InternalError();
 534    }
 535    }
 536   
 537    /**
 538    * Tests if some key maps into the specified value in this table.
 539    * This operation is more expensive than the <code>containsKey</code>
 540    * method.<p>
 541    *
 542    * Note that this method is identical in functionality to containsValue,
 543    * (which is part of the Map interface in the collections framework).
 544    *
 545    * @param value a value to search for.
 546    * @return <code>true</code> if and only if some key maps to the
 547    * <code>value</code> argument in this table as
 548    * determined by the <tt>equals</tt> method;
 549    * <code>false</code> otherwise.
 550    * @exception NullPointerException if the value is <code>null</code>.
 551    * @see #containsKey(Object)
 552    * @see #containsValue(Object)
 553    * @see Map
 554    */
 555  0 public boolean contains(Object value) {
 556  0 return containsValue(value);
 557    }
 558   
 559    /**
 560    * Tests if the specified object is a key in this table.
 561    *
 562    * @param key possible key.
 563    * @return <code>true</code> if and only if the specified object
 564    * is a key in this table, as determined by the
 565    * <tt>equals</tt> method; <code>false</code> otherwise.
 566    * @exception NullPointerException if the key is
 567    * <code>null</code>.
 568    * @see #contains(Object)
 569    */
 570  24 public boolean containsKey(Object key) {
 571  24 return get(key) != null;
 572   
 573    /** OpenSymphony BEGIN */
 574   
 575    // TODO: Also check the persistence?
 576   
 577    /** OpenSymphony END */
 578    }
 579   
 580    /**
 581    * Returns <tt>true</tt> if this map maps one or more keys to the
 582    * specified value. Note: This method requires a full internal
 583    * traversal of the hash table, and so is much slower than
 584    * method <tt>containsKey</tt>.
 585    *
 586    * @param value value whose presence in this map is to be tested.
 587    * @return <tt>true</tt> if this map maps one or more keys to the
 588    * specified value.
 589    * @exception NullPointerException if the value is <code>null</code>.
 590    */
 591  0 public boolean containsValue(Object value) {
 592  0 if (value == null) {
 593  0 throw new NullPointerException();
 594    }
 595   
 596  0 Entry[] tab = getTableForReading();
 597   
 598  0 for (int i = 0; i < tab.length; ++i) {
 599  0 for (Entry e = tab[i]; e != null; e = e.next) {
 600  0 Object v = e.value;
 601   
 602  0 if ((v != null) && value.equals(v)) {
 603  0 return true;
 604    }
 605    }
 606    }
 607   
 608  0 return false;
 609    }
 610   
 611    /**
 612    * Returns an enumeration of the values in this table.
 613    * Use the Enumeration methods on the returned object to fetch the elements
 614    * sequentially.
 615    *
 616    * @return an enumeration of the values in this table.
 617    * @see java.util.Enumeration
 618    * @see #keys()
 619    * @see #values()
 620    * @see Map
 621    */
 622  0 public Enumeration elements() {
 623  0 return new ValueIterator();
 624    }
 625   
 626    /**
 627    * Returns a collection view of the mappings contained in this map.
 628    * Each element in the returned collection is a <tt>Map.Entry</tt>. The
 629    * collection is backed by the map, so changes to the map are reflected in
 630    * the collection, and vice-versa. The collection supports element
 631    * removal, which removes the corresponding mapping from the map, via the
 632    * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
 633    * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
 634    * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
 635    *
 636    * @return a collection view of the mappings contained in this map.
 637    */
 638  24 public Set entrySet() {
 639  24 Set es = entrySet;
 640   
 641  24 if (es != null) {
 642  0 return es;
 643    } else {
 644  24 return entrySet = new AbstractSet() {
 645  24 public Iterator iterator() {
 646  24 return new HashIterator();
 647    }
 648   
 649  0 public boolean contains(Object o) {
 650  0 if (!(o instanceof Map.Entry)) {
 651  0 return false;
 652    }
 653   
 654  0 Map.Entry entry = (Map.Entry) o;
 655  0 Object key = entry.getKey();
 656  0 Object v = AbstractConcurrentReadCache.this.get(key);
 657   
 658  0 return (v != null) && v.equals(entry.getValue());
 659    }
 660   
 661  0 public boolean remove(Object o) {
 662  0 if (!(o instanceof Map.Entry)) {
 663  0 return false;
 664    }
 665   
 666  0 return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o);
 667    }
 668   
 669  0 public int size() {
 670  0 return AbstractConcurrentReadCache.this.size();
 671    }
 672   
 673  0 public void clear() {
 674  0 AbstractConcurrentReadCache.this.clear();
 675    }
 676    };
 677    }
 678    }
 679   
 680    /**
 681    * Returns the value to which the specified key is mapped in this table.
 682    *
 683    * @param key a key in the table.
 684    * @return the value to which the key is mapped in this table;
 685    * <code>null</code> if the key is not mapped to any value in
 686    * this table.
 687    * @exception NullPointerException if the key is
 688    * <code>null</code>.
 689    * @see #put(Object, Object)
 690    */
 691  2024875 public Object get(Object key) {
 692  2024875 if (log.isDebugEnabled()) {
 693  0 log.debug("get called (key=" + key + ")");
 694    }
 695   
 696    // throw null pointer exception if key null
 697  2024875 int hash = hash(key);
 698   
 699    /*
 700    Start off at the apparently correct bin. If entry is found, we
 701    need to check after a barrier anyway. If not found, we need a
 702    barrier to check if we are actually in right bin. So either
 703    way, we encounter only one barrier unless we need to retry.
 704    And we only need to fully synchronize if there have been
 705    concurrent modifications.
 706    */
 707  2024332 Entry[] tab = table;
 708  2024851 int index = hash & (tab.length - 1);
 709  2024851 Entry first = tab[index];
 710  2024851 Entry e = first;
 711   
 712  2024851 for (;;) {
 713  2028209 if (e == null) {
 714    // If key apparently not there, check to
 715    // make sure this was a valid read
 716  16309 tab = getTableForReading();
 717   
 718  16309 if (first == tab[index]) {
 719    /** OpenSymphony BEGIN */
 720   
 721    /* Previous code
 722    return null;*/
 723   
 724    // Not in the table, try persistence
 725  16309 Object value = persistRetrieve(key);
 726   
 727  16309 if (value != null) {
 728    // Update the map, but don't persist the data
 729  24 put(key, value, false);
 730    }
 731   
 732  16309 return value;
 733   
 734    /** OpenSymphony END */
 735    } else {
 736    // Wrong list -- must restart traversal at new first
 737  0 e = first = tab[index = hash & (tab.length - 1)];
 738    }
 739    }
 740    // checking for pointer equality first wins in most applications
 741  2011900 else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 742  2008542 Object value = e.value;
 743   
 744  2008542 if (value != null) {
 745    /** OpenSymphony BEGIN */
 746   
 747    /* Previous code
 748    return value;*/
 749  2008542 if (NULL.equals(value)) {
 750    // Memory cache disable, use disk
 751  120 value = persistRetrieve(e.key);
 752   
 753  120 if (value != null) {
 754  120 itemRetrieved(key);
 755    }
 756   
 757  120 return value; // fix [CACHE-13]
 758    } else {
 759  2008422 itemRetrieved(key);
 760   
 761  2008422 return value;
 762    }
 763   
 764    /** OpenSymphony END */
 765    }
 766   
 767    // Entry was invalidated during deletion. But it could
 768    // have been re-inserted, so we must retraverse.
 769    // To avoid useless contention, get lock to wait out modifications
 770    // before retraversing.
 771  0 synchronized (this) {
 772  0 tab = table;
 773    }
 774   
 775  0 e = first = tab[index = hash & (tab.length - 1)];
 776    } else {
 777  3358 e = e.next;
 778    }
 779    }
 780    }
 781   
 782    /**
 783    * Returns a set view of the keys contained in this map.
 784    * The set is backed by the map, so changes to the map are reflected in the set, and
 785    * vice-versa. The set supports element removal, which removes the
 786    * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
 787    * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
 788    * <tt>clear</tt> operations. It does not support the <tt>add</tt> or
 789    * <tt>addAll</tt> operations.
 790    *
 791    * @return a set view of the keys contained in this map.
 792    */
 793  24 public Set keySet() {
 794  24 Set ks = keySet;
 795   
 796  24 if (ks != null) {
 797  8 return ks;
 798    } else {
 799  16 return keySet = new AbstractSet() {
 800  24 public Iterator iterator() {
 801  24 return new KeyIterator();
 802    }
 803   
 804  0 public int size() {
 805  0 return AbstractConcurrentReadCache.this.size();
 806    }
 807   
 808  0 public boolean contains(Object o) {
 809  0 return AbstractConcurrentReadCache.this.containsKey(o);
 810    }
 811   
 812  0 public boolean remove(Object o) {
 813  0 return AbstractConcurrentReadCache.this.remove(o) != null;
 814    }
 815   
 816  0 public void clear() {
 817  0 AbstractConcurrentReadCache.this.clear();
 818    }
 819    };
 820    }
 821    }
 822   
 823    /**
 824    * Returns an enumeration of the keys in this table.
 825    *
 826    * @return an enumeration of the keys in this table.
 827    * @see Enumeration
 828    * @see #elements()
 829    * @see #keySet()
 830    * @see Map
 831    */
 832  0 public Enumeration keys() {
 833  0 return new KeyIterator();
 834    }
 835   
 836    /**
 837    * Return the load factor
 838    **/
 839  0 public float loadFactor() {
 840  0 return loadFactor;
 841    }
 842   
 843    /**
 844    * Maps the specified <code>key</code> to the specified <code>value</code> in this table.
 845    * Neither the key nor the
 846    * value can be <code>null</code>. <p>
 847    *
 848    * The value can be retrieved by calling the <code>get</code> method
 849    * with a key that is equal to the original key.
 850    *
 851    * @param key the table key.
 852    * @param value the value.
 853    * @return the previous value of the specified key in this table,
 854    * or <code>null</code> if it did not have one.
 855    * @exception NullPointerException if the key or value is
 856    * <code>null</code>.
 857    * @see Object#equals(Object)
 858    * @see #get(Object)
 859    */
 860    /** OpenSymphony BEGIN */
 861  12622 public Object put(Object key, Object value) {
 862    // Call the internal put using persistance
 863  12622 return put(key, value, true);
 864    }
 865   
 866    /**
 867    * Copies all of the mappings from the specified map to this one.
 868    *
 869    * These mappings replace any mappings that this map had for any of the
 870    * keys currently in the specified Map.
 871    *
 872    * @param t Mappings to be stored in this map.
 873    */
 874  0 public synchronized void putAll(Map t) {
 875  0 for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
 876  0 Map.Entry entry = (Map.Entry) it.next();
 877  0 Object key = entry.getKey();
 878  0 Object value = entry.getValue();
 879  0 put(key, value);
 880    }
 881    }
 882   
 883    /**
 884    * Removes the key (and its corresponding value) from this table.
 885    * This method does nothing if the key is not in the table.
 886    *
 887    * @param key the key that needs to be removed.
 888    * @return the value to which the key had been mapped in this table,
 889    * or <code>null</code> if the key did not have a mapping.
 890    * @exception NullPointerException if the key is
 891    * <code>null</code>.
 892    */
 893    /** OpenSymphony BEGIN */
 894  24 public Object remove(Object key) {
 895  24 return remove(key, true);
 896    }
 897   
 898    /**
 899    * Returns the total number of cache entries held in this map.
 900    *
 901    * @return the number of key-value mappings in this map.
 902    */
 903  8724 public synchronized int size() {
 904  8724 return count;
 905    }
 906   
 907    /**
 908    * Returns a collection view of the values contained in this map.
 909    * The collection is backed by the map, so changes to the map are reflected in
 910    * the collection, and vice-versa. The collection supports element
 911    * removal, which removes the corresponding mapping from this map, via the
 912    * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
 913    * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
 914    * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
 915    *
 916    * @return a collection view of the values contained in this map.
 917    */
 918  0 public Collection values() {
 919  0 Collection vs = values;
 920   
 921  0 if (vs != null) {
 922  0 return vs;
 923    } else {
 924  0 return values = new AbstractCollection() {
 925  0 public Iterator iterator() {
 926  0 return new ValueIterator();
 927    }
 928   
 929  0 public int size() {
 930  0 return AbstractConcurrentReadCache.this.size();
 931    }
 932   
 933  0 public boolean contains(Object o) {
 934  0 return AbstractConcurrentReadCache.this.containsValue(o);
 935    }
 936   
 937  0 public void clear() {
 938  0 AbstractConcurrentReadCache.this.clear();
 939    }
 940    };
 941    }
 942    }
 943   
 944    /**
 945    * Get ref to group.
 946    * CACHE-127 Synchronized copying of the group entry set since
 947    * the new HashSet(Collection c) constructor uses the iterator.
 948    * This may slow things down but it is better than a
 949    * ConcurrentModificationException. We might have to revisit the
 950    * code if performance is too adversely impacted.
 951    **/
 952  27 protected synchronized final Set getGroupForReading(String groupName) {
 953  27 Set group = (Set) getGroupsForReading().get(groupName);
 954  3 if (group == null) return null;
 955  24 return new HashSet(group);
 956    }
 957   
 958    /**
 959    * Get ref to groups.
 960    * The reference and the cells it
 961    * accesses will be at least as fresh as from last
 962    * use of barrierLock
 963    **/
 964  27 protected final Map getGroupsForReading() {
 965  27 synchronized (barrierLock) {
 966  27 return groups;
 967    }
 968    }
 969   
 970    /**
 971    * Get ref to table; the reference and the cells it
 972    * accesses will be at least as fresh as from last
 973    * use of barrierLock
 974    **/
 975  16357 protected final Entry[] getTableForReading() {
 976  16357 synchronized (barrierLock) {
 977  16357 return table;
 978    }
 979    }
 980   
 981    /**
 982    * Force a memory synchronization that will cause
 983    * all readers to see table. Call only when already
 984    * holding main synch lock.
 985    **/
 986  15984 protected final void recordModification(Object x) {
 987  15984 synchronized (barrierLock) {
 988  15984 lastWrite = x;
 989    }
 990    }
 991   
 992    /**
 993    * Helper method for entrySet remove.
 994    **/
 995  0 protected synchronized boolean findAndRemoveEntry(Map.Entry entry) {
 996  0 Object key = entry.getKey();
 997  0 Object v = get(key);
 998   
 999  0 if ((v != null) && v.equals(entry.getValue())) {
 1000  0 remove(key);
 1001   
 1002  0 return true;
 1003    } else {
 1004  0 return false;
 1005    }
 1006    }
 1007   
 1008    /**
 1009    * Remove an object from the persistence.
 1010    * @param key The key of the object to remove
 1011    */
 1012  7245 protected void persistRemove(Object key) {
 1013  7245 if (log.isDebugEnabled()) {
 1014  0 log.debug("PersistRemove called (key=" + key + ")");
 1015    }
 1016   
 1017  7245 if (persistenceListener != null) {
 1018  21 try {
 1019  21 persistenceListener.remove((String) key);
 1020    } catch (CachePersistenceException e) {
 1021  0 log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e);
 1022    }
 1023    }
 1024    }
 1025   
 1026    /**
 1027    * Removes a cache group using the persistence listener.
 1028    * @param groupName The name of the group to remove
 1029    */
 1030  0 protected void persistRemoveGroup(String groupName) {
 1031  0 if (log.isDebugEnabled()) {
 1032  0 log.debug("persistRemoveGroup called (groupName=" + groupName + ")");
 1033    }
 1034   
 1035  0 if (persistenceListener != null) {
 1036  0 try {
 1037  0 persistenceListener.removeGroup(groupName);
 1038    } catch (CachePersistenceException e) {
 1039  0 log.error("[oscache] Exception removing group " + groupName, e);
 1040    }
 1041    }
 1042    }
 1043   
 1044    /**
 1045    * Retrieve an object from the persistence listener.
 1046    * @param key The key of the object to retrieve
 1047    */
 1048  16460 protected Object persistRetrieve(Object key) {
 1049  16460 if (log.isDebugEnabled()) {
 1050  0 log.debug("persistRetrieve called (key=" + key + ")");
 1051    }
 1052   
 1053  16460 Object entry = null;
 1054   
 1055  16460 if (persistenceListener != null) {
 1056  380 try {
 1057  380 entry = persistenceListener.retrieve((String) key);
 1058    } catch (CachePersistenceException e) {
 1059    /**
 1060    * It is normal that we get an exception occasionally.
 1061    * It happens when the item is invalidated (written or removed)
 1062    * during read. The logic is constructed so that read is retried.
 1063    */
 1064    }
 1065    }
 1066   
 1067  16460 return entry;
 1068    }
 1069   
 1070    /**
 1071    * Retrieves a cache group using the persistence listener.
 1072    * @param groupName The name of the group to retrieve
 1073    */
 1074  148 protected Set persistRetrieveGroup(String groupName) {
 1075  148 if (log.isDebugEnabled()) {
 1076  0 log.debug("persistRetrieveGroup called (groupName=" + groupName + ")");
 1077    }
 1078   
 1079  148 if (persistenceListener != null) {
 1080  109 try {
 1081  109 return persistenceListener.retrieveGroup(groupName);
 1082    } catch (CachePersistenceException e) {
 1083  0 log.error("[oscache] Exception retrieving group " + groupName, e);
 1084    }
 1085    }
 1086   
 1087  39 return null;
 1088    }
 1089   
 1090    /**
 1091    * Store an object in the cache using the persistence listener.
 1092    * @param key The object key
 1093    * @param obj The object to store
 1094    */
 1095  12444 protected void persistStore(Object key, Object obj) {
 1096  12444 if (log.isDebugEnabled()) {
 1097  0 log.debug("persistStore called (key=" + key + ")");
 1098    }
 1099   
 1100  12444 if (persistenceListener != null) {
 1101  182 try {
 1102  182 persistenceListener.store((String) key, obj);
 1103    } catch (CachePersistenceException e) {
 1104  0 log.error("[oscache] Exception persisting " + key, e);
 1105    }
 1106    }
 1107    }
 1108   
 1109    /**
 1110    * Creates or Updates a cache group using the persistence listener.
 1111    * @param groupName The name of the group to update
 1112    * @param group The entries for the group
 1113    */
 1114  130 protected void persistStoreGroup(String groupName, Set group) {
 1115  130 if (log.isDebugEnabled()) {
 1116  0 log.debug("persistStoreGroup called (groupName=" + groupName + ")");
 1117    }
 1118   
 1119  130 if (persistenceListener != null) {
 1120  98 try {
 1121  98 if ((group == null) || group.isEmpty()) {
 1122  0 persistenceListener.removeGroup(groupName);
 1123    } else {
 1124  98 persistenceListener.storeGroup(groupName, group);
 1125    }
 1126    } catch (CachePersistenceException e) {
 1127  0 log.error("[oscache] Exception persisting group " + groupName, e);
 1128    }
 1129    }
 1130    }
 1131   
 1132    /**
 1133    * Removes the entire cache from persistent storage.
 1134    */
 1135  204 protected void persistClear() {
 1136  204 if (log.isDebugEnabled()) {
 1137  0 log.debug("persistClear called");
 1138    ;
 1139    }
 1140   
 1141  204 if (persistenceListener != null) {
 1142  85 try {
 1143  85 persistenceListener.clear();
 1144    } catch (CachePersistenceException e) {
 1145  0 log.error("[oscache] Exception clearing persistent cache", e);
 1146    }
 1147    }
 1148    }
 1149   
 1150    /**
 1151    * Notify the underlying implementation that an item was put in the cache.
 1152    *
 1153    * @param key The cache key of the item that was put.
 1154    */
 1155    protected abstract void itemPut(Object key);
 1156   
 1157    /**
 1158    * Notify any underlying algorithm that an item has been retrieved from the cache.
 1159    *
 1160    * @param key The cache key of the item that was retrieved.
 1161    */
 1162    protected abstract void itemRetrieved(Object key);
 1163   
 1164    /**
 1165    * Notify the underlying implementation that an item was removed from the cache.
 1166    *
 1167    * @param key The cache key of the item that was removed.
 1168    */
 1169    protected abstract void itemRemoved(Object key);
 1170   
 1171    /**
 1172    * The cache has reached its cacpacity and an item needs to be removed.
 1173    * (typically according to an algorithm such as LRU or FIFO).
 1174    *
 1175    * @return The key of whichever item was removed.
 1176    */
 1177    protected abstract Object removeItem();
 1178   
 1179    /**
 1180    * Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
 1181    * instance from a stream (i.e.,
 1182    * deserialize it).
 1183    */
 1184  0 private synchronized void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
 1185    // Read in the threshold, loadfactor, and any hidden stuff
 1186  0 s.defaultReadObject();
 1187   
 1188    // Read in number of buckets and allocate the bucket array;
 1189  0 int numBuckets = s.readInt();
 1190  0 table = new Entry[numBuckets];
 1191   
 1192    // Read in size (number of Mappings)
 1193  0 int size = s.readInt();
 1194   
 1195    // Read the keys and values, and put the mappings in the table
 1196  0 for (int i = 0; i < size; i++) {
 1197  0 Object key = s.readObject();
 1198  0 Object value = s.readObject();
 1199  0 put(key, value);
 1200    }
 1201    }
 1202   
 1203    /**
 1204    * Rehashes the contents of this map into a new table with a larger capacity.
 1205    * This method is called automatically when the
 1206    * number of keys in this map exceeds its capacity and load factor.
 1207    */
 1208  28 protected void rehash() {
 1209  28 Entry[] oldMap = table;
 1210  28 int oldCapacity = oldMap.length;
 1211   
 1212  28 if (oldCapacity >= MAXIMUM_CAPACITY) {
 1213  0 return;
 1214    }
 1215   
 1216  28 int newCapacity = oldCapacity << 1;
 1217  28 Entry[] newMap = new Entry[newCapacity];
 1218  28 threshold = (int) (newCapacity * loadFactor);
 1219   
 1220    /*
 1221    We need to guarantee that any existing reads of oldMap can
 1222    proceed. So we cannot yet null out each oldMap bin.
 1223   
 1224    Because we are using power-of-two expansion, the elements
 1225    from each bin must either stay at same index, or move
 1226    to oldCapacity+index. We also minimize new node creation by
 1227    catching cases where old nodes can be reused because their
 1228    .next fields won't change. (This is checked only for sequences
 1229    of one and two. It is not worth checking longer ones.)
 1230    */
 1231  28 for (int i = 0; i < oldCapacity; ++i) {
 1232  1920 Entry l = null;
 1233  1920 Entry h = null;
 1234  1920 Entry e = oldMap[i];
 1235   
 1236  1920 while (e != null) {
 1237  1257 int hash = e.hash;
 1238  1257 Entry next = e.next;
 1239   
 1240  1257 if ((hash & oldCapacity) == 0) {
 1241    // stays at newMap[i]
 1242  583 if (l == null) {
 1243    // try to reuse node
 1244  569 if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) {
 1245  290 l = e;
 1246   
 1247  290 break;
 1248    }
 1249    }
 1250   
 1251  293 l = new Entry(hash, e.key, e.value, l);
 1252    } else {
 1253    // moves to newMap[oldCapacity+i]
 1254  674 if (h == null) {
 1255  674 if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) {
 1256  549 h = e;
 1257   
 1258  549 break;
 1259    }
 1260    }
 1261   
 1262  125 h = new Entry(hash, e.key, e.value, h);
 1263    }
 1264   
 1265  418 e = next;
 1266    }
 1267   
 1268  1920 newMap[i] = l;
 1269  1920 newMap[oldCapacity + i] = h;
 1270    }
 1271   
 1272  28 table = newMap;
 1273  28 recordModification(newMap);
 1274    }
 1275   
 1276    /**
 1277    * Continuation of put(), called only when synch lock is
 1278    * held and interference has been detected.
 1279    **/
 1280    /** OpenSymphony BEGIN */
 1281   
 1282    /* Previous code
 1283    protected Object sput(Object key, Object value, int hash) {*/
 1284  10 protected Object sput(Object key, Object value, int hash, boolean persist) {
 1285    /** OpenSymphony END */
 1286  10 Entry[] tab = table;
 1287  10 int index = hash & (tab.length - 1);
 1288  10 Entry first = tab[index];
 1289  10 Entry e = first;
 1290   
 1291  10 for (;;) {
 1292  21 if (e == null) {
 1293    /** OpenSymphony BEGIN */
 1294   
 1295    // Previous code
 1296    // Entry newEntry = new Entry(hash, key, value, first);
 1297  10 Entry newEntry;
 1298   
 1299  10 if (memoryCaching) {
 1300  6 newEntry = new Entry(hash, key, value, first);
 1301    } else {
 1302  4 newEntry = new Entry(hash, key, NULL, first);
 1303    }
 1304   
 1305  10 itemPut(key);
 1306   
 1307    // Persist if required
 1308  10 if (persist && !overflowPersistence) {
 1309  10 persistStore(key, value);
 1310    }
 1311   
 1312    // If we have a CacheEntry, update the group lookups
 1313  10 if (value instanceof CacheEntry) {
 1314  10 updateGroups(null, (CacheEntry) value, persist);
 1315    }
 1316   
 1317    /** OpenSymphony END */
 1318  10 tab[index] = newEntry;
 1319   
 1320  10 if (++count >= threshold) {
 1321  0 rehash();
 1322    } else {
 1323  10 recordModification(newEntry);
 1324    }
 1325   
 1326  10 return null;
 1327  11 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1328  0 Object oldValue = e.value;
 1329   
 1330    /** OpenSymphony BEGIN */
 1331   
 1332    /* Previous code
 1333    e.value = value; */
 1334  0 if (memoryCaching) {
 1335  0 e.value = value;
 1336    }
 1337   
 1338    // Persist if required
 1339  0 if (persist && overflowPersistence) {
 1340  0 persistRemove(key);
 1341  0 } else if (persist) {
 1342  0 persistStore(key, value);
 1343    }
 1344   
 1345  0 updateGroups(oldValue, value, persist);
 1346   
 1347  0 itemPut(key);
 1348   
 1349    /** OpenSymphony END */
 1350  0 return oldValue;
 1351    } else {
 1352  11 e = e.next;
 1353    }
 1354    }
 1355    }
 1356   
 1357    /**
 1358    * Continuation of remove(), called only when synch lock is
 1359    * held and interference has been detected.
 1360    **/
 1361    /** OpenSymphony BEGIN */
 1362   
 1363    /* Previous code
 1364    protected Object sremove(Object key, int hash) { */
 1365  0 protected Object sremove(Object key, int hash, boolean invokeAlgorithm) {
 1366    /** OpenSymphony END */
 1367  0 Entry[] tab = table;
 1368  0 int index = hash & (tab.length - 1);
 1369  0 Entry first = tab[index];
 1370  0 Entry e = first;
 1371   
 1372  0 for (;;) {
 1373  0 if (e == null) {
 1374  0 return null;
 1375  0 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1376  0 Object oldValue = e.value;
 1377  0 e.value = null;
 1378  0 count--;
 1379   
 1380    /** OpenSymphony BEGIN */
 1381  0 if (!unlimitedDiskCache && !overflowPersistence) {
 1382  0 persistRemove(e.key);
 1383    }
 1384   
 1385  0 if (overflowPersistence && ((size() + 1) >= maxEntries)) {
 1386  0 persistStore(key, oldValue);
 1387    }
 1388   
 1389  0 if (invokeAlgorithm) {
 1390  0 itemRemoved(key);
 1391    }
 1392   
 1393    /** OpenSymphony END */
 1394  0 Entry head = e.next;
 1395   
 1396  0 for (Entry p = first; p != e; p = p.next) {
 1397  0 head = new Entry(p.hash, p.key, p.value, head);
 1398    }
 1399   
 1400  0 tab[index] = head;
 1401  0 recordModification(head);
 1402   
 1403  0 return oldValue;
 1404    } else {
 1405  0 e = e.next;
 1406    }
 1407    }
 1408    }
 1409   
 1410    /**
 1411    * Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
 1412    * (i.e., serialize it).
 1413    *
 1414    * @serialData The <i>capacity</i> of the
 1415    * AbstractConcurrentReadCache (the length of the
 1416    * bucket array) is emitted (int), followed by the
 1417    * <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
 1418    * mappings), followed by the key (Object) and value (Object)
 1419    * for each key-value mapping represented by the AbstractConcurrentReadCache
 1420    * The key-value mappings are emitted in no particular order.
 1421    */
 1422  0 private synchronized void writeObject(java.io.ObjectOutputStream s) throws IOException {
 1423    // Write out the threshold, loadfactor, and any hidden stuff
 1424  0 s.defaultWriteObject();
 1425   
 1426    // Write out number of buckets
 1427  0 s.writeInt(table.length);
 1428   
 1429    // Write out size (number of Mappings)
 1430  0 s.writeInt(count);
 1431   
 1432    // Write out keys and values (alternating)
 1433  0 for (int index = table.length - 1; index >= 0; index--) {
 1434  0 Entry entry = table[index];
 1435   
 1436  0 while (entry != null) {
 1437  0 s.writeObject(entry.key);
 1438  0 s.writeObject(entry.value);
 1439  0 entry = entry.next;
 1440    }
 1441    }
 1442    }
 1443   
 1444    /**
 1445    * Return hash code for Object x.
 1446    * Since we are using power-of-two
 1447    * tables, it is worth the effort to improve hashcode via
 1448    * the same multiplicative scheme as used in IdentityHashMap.
 1449    */
 1450  2044761 private static int hash(Object x) {
 1451  2043822 int h = x.hashCode();
 1452   
 1453    // Multiply by 127 (quickly, via shifts), and mix in some high
 1454    // bits to help guard against bunching of codes that are
 1455    // consecutive or equally spaced.
 1456  2044690 return ((h << 7) - h + (h >>> 9) + (h >>> 17));
 1457    }
 1458   
 1459    /**
 1460    * Add this cache key to the groups specified groups.
 1461    * We have to treat the
 1462    * memory and disk group mappings seperately so they remain valid for their
 1463    * corresponding memory/disk caches. (eg if mem is limited to 100 entries
 1464    * and disk is unlimited, the group mappings will be different).
 1465    *
 1466    * @param key The cache key that we are ading to the groups.
 1467    * @param newGroups the set of groups we want to add this cache entry to.
 1468    * @param persist A flag to indicate whether the keys should be added to
 1469    * the persistent cache layer.
 1470    */
 1471  152 private void addGroupMappings(String key, Set newGroups, boolean persist) {
 1472    // Add this CacheEntry to the groups that it is now a member of
 1473  152 for (Iterator it = newGroups.iterator(); it.hasNext();) {
 1474  142 String groupName = (String) it.next();
 1475   
 1476    // Update the in-memory groups
 1477  142 if (memoryCaching) {
 1478  102 if (groups == null) {
 1479  0 groups = new HashMap();
 1480    }
 1481   
 1482  102 Set memoryGroup = (Set) groups.get(groupName);
 1483   
 1484  102 if (memoryGroup == null) {
 1485  33 memoryGroup = new HashSet();
 1486  33 groups.put(groupName, memoryGroup);
 1487    }
 1488   
 1489  102 memoryGroup.add(key);
 1490    }
 1491   
 1492    // Update the persistent group maps
 1493  142 if (persist) {
 1494  112 Set persistentGroup = persistRetrieveGroup(groupName);
 1495   
 1496  112 if (persistentGroup == null) {
 1497  47 persistentGroup = new HashSet();
 1498    }
 1499   
 1500  112 persistentGroup.add(key);
 1501  112 persistStoreGroup(groupName, persistentGroup);
 1502    }
 1503    }
 1504    }
 1505   
 1506    /** OpenSymphony END (pretty long!) */
 1507    /**
 1508    * Returns the appropriate capacity (power of two) for the specified
 1509    * initial capacity argument.
 1510    */
 1511  132 private int p2capacity(int initialCapacity) {
 1512  132 int cap = initialCapacity;
 1513   
 1514    // Compute the appropriate capacity
 1515  132 int result;
 1516   
 1517  132 if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
 1518  0 result = MAXIMUM_CAPACITY;
 1519    } else {
 1520  132 result = MINIMUM_CAPACITY;
 1521   
 1522  132 while (result < cap) {
 1523  396 result <<= 1;
 1524    }
 1525    }
 1526   
 1527  132 return result;
 1528    }
 1529   
 1530    /* Previous code
 1531    public Object put(Object key, Object value)*/
 1532  12646 private Object put(Object key, Object value, boolean persist) {
 1533    /** OpenSymphony END */
 1534  12646 if (value == null) {
 1535  24 throw new NullPointerException();
 1536    }
 1537   
 1538  12622 int hash = hash(key);
 1539  12622 Entry[] tab = table;
 1540  12622 int index = hash & (tab.length - 1);
 1541  12622 Entry first = tab[index];
 1542  12622 Entry e = first;
 1543   
 1544  12622 for (;;) {
 1545  15941 if (e == null) {
 1546  8516 synchronized (this) {
 1547  8516 tab = table;
 1548   
 1549    /** OpenSymphony BEGIN */
 1550   
 1551    // Previous code
 1552   
 1553    /* if (first == tab[index]) {
 1554    // Add to front of list
 1555    Entry newEntry = new Entry(hash, key, value, first);
 1556    tab[index] = newEntry;
 1557    if (++count >= threshold) rehash();
 1558    else recordModification(newEntry);
 1559    return null; */
 1560   
 1561    // Remove an item if the cache is full
 1562  8516 if (size() >= maxEntries) {
 1563  7224 remove(removeItem(), false);
 1564    }
 1565   
 1566  8516 if (first == tab[index]) {
 1567    // Add to front of list
 1568  8506 Entry newEntry = null;
 1569   
 1570  8506 if (memoryCaching) {
 1571  8444 newEntry = new Entry(hash, key, value, first);
 1572    } else {
 1573  62 newEntry = new Entry(hash, key, NULL, first);
 1574    }
 1575   
 1576  8506 tab[index] = newEntry;
 1577  8506 itemPut(key);
 1578   
 1579    // Persist if required
 1580  8506 if (persist && !overflowPersistence) {
 1581  8333 persistStore(key, value);
 1582    }
 1583   
 1584    // If we have a CacheEntry, update the group lookups
 1585  8506 if (value instanceof CacheEntry) {
 1586  8250 updateGroups(null, (CacheEntry) value, persist);
 1587    }
 1588   
 1589  8506 if (++count >= threshold) {
 1590  28 rehash();
 1591    } else {
 1592  8478 recordModification(newEntry);
 1593    }
 1594   
 1595  8506 return newEntry;
 1596   
 1597    /** OpenSymphony END */
 1598    } else {
 1599    // wrong list -- retry
 1600   
 1601    /** OpenSymphony BEGIN */
 1602   
 1603    /* Previous code
 1604    return sput(key, value, hash);*/
 1605  10 return sput(key, value, hash, persist);
 1606   
 1607    /** OpenSymphony END */
 1608    }
 1609    }
 1610  7425 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1611    // synch to avoid race with remove and to
 1612    // ensure proper serialization of multiple replaces
 1613  4106 synchronized (this) {
 1614  4106 tab = table;
 1615   
 1616  4106 Object oldValue = e.value;
 1617   
 1618    // [CACHE-118] - get the old cache entry even if there's no memory cache
 1619  4106 if (persist && (oldValue == NULL)) {
 1620  31 oldValue = persistRetrieve(key);
 1621    }
 1622   
 1623  4106 if ((first == tab[index]) && (oldValue != null)) {
 1624    /** OpenSymphony BEGIN */
 1625   
 1626    /* Previous code
 1627    e.value = value;
 1628    return oldValue; */
 1629  4106 if (memoryCaching) {
 1630  4075 e.value = value;
 1631    }
 1632   
 1633    // Persist if required
 1634  4106 if (persist && overflowPersistence) {
 1635  21 persistRemove(key);
 1636  4085 } else if (persist) {
 1637  4085 persistStore(key, value);
 1638    }
 1639   
 1640  4106 updateGroups(oldValue, value, persist);
 1641  4106 itemPut(key);
 1642   
 1643  4106 return oldValue;
 1644   
 1645    /** OpenSymphony END */
 1646    } else {
 1647    // retry if wrong list or lost race against concurrent remove
 1648   
 1649    /** OpenSymphony BEGIN */
 1650   
 1651    /* Previous code
 1652    return sput(key, value, hash);*/
 1653  0 return sput(key, value, hash, persist);
 1654   
 1655    /** OpenSymphony END */
 1656    }
 1657    }
 1658    } else {
 1659  3319 e = e.next;
 1660    }
 1661    }
 1662    }
 1663   
 1664  7264 private synchronized Object remove(Object key, boolean invokeAlgorithm)
 1665    /* Previous code
 1666    public Object remove(Object key) */
 1667   
 1668    /** OpenSymphony END */ {
 1669    /*
 1670    Strategy:
 1671   
 1672    Find the entry, then
 1673    1. Set value field to null, to force get() to retry
 1674    2. Rebuild the list without this entry.
 1675    All entries following removed node can stay in list, but
 1676    all preceeding ones need to be cloned. Traversals rely
 1677    on this strategy to ensure that elements will not be
 1678    repeated during iteration.
 1679    */
 1680   
 1681    /** OpenSymphony BEGIN */
 1682  7264 if (key == null) {
 1683  0 return null;
 1684    }
 1685   
 1686    /** OpenSymphony END */
 1687  7264 int hash = hash(key);
 1688  7264 Entry[] tab = table;
 1689  7264 int index = hash & (tab.length - 1);
 1690  7264 Entry first = tab[index];
 1691  7264 Entry e = first;
 1692   
 1693  7264 for (;;) {
 1694  9816 if (e == null) {
 1695  0 tab = getTableForReading();
 1696   
 1697  0 if (first == tab[index]) {
 1698  0 return null;
 1699    } else {
 1700    // Wrong list -- must restart traversal at new first
 1701   
 1702    /** OpenSymphony BEGIN */
 1703   
 1704    /* Previous Code
 1705    return sremove(key, hash); */
 1706  0 return sremove(key, hash, invokeAlgorithm);
 1707   
 1708    /** OpenSymphony END */
 1709    }
 1710  9816 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1711  7264 synchronized (this) {
 1712  7264 tab = table;
 1713   
 1714  7264 Object oldValue = e.value;
 1715   
 1716    // re-find under synch if wrong list
 1717  7264 if ((first != tab[index]) || (oldValue == null)) {
 1718    /** OpenSymphony BEGIN */
 1719   
 1720    /* Previous Code
 1721    return sremove(key, hash); */
 1722  0 return sremove(key, hash, invokeAlgorithm);
 1723    }
 1724   
 1725    /** OpenSymphony END */
 1726  7264 e.value = null;
 1727  7264 count--;
 1728   
 1729    /** OpenSymphony BEGIN */
 1730  7264 if (!unlimitedDiskCache && !overflowPersistence) {
 1731  7224 persistRemove(e.key);
 1732    }
 1733   
 1734  7264 if (overflowPersistence && ((size() + 1) >= maxEntries)) {
 1735  16 persistStore(key, oldValue);
 1736    }
 1737   
 1738  7264 if (invokeAlgorithm) {
 1739  24 itemRemoved(key);
 1740    }
 1741   
 1742    /** OpenSymphony END */
 1743  7264 Entry head = e.next;
 1744   
 1745  7264 for (Entry p = first; p != e; p = p.next) {
 1746  2552 head = new Entry(p.hash, p.key, p.value, head);
 1747    }
 1748   
 1749  7264 tab[index] = head;
 1750  7264 recordModification(head);
 1751   
 1752  7264 return oldValue;
 1753    }
 1754    } else {
 1755  2552 e = e.next;
 1756    }
 1757    }
 1758    }
 1759   
 1760    /**
 1761    * Remove this CacheEntry from the groups it no longer belongs to.
 1762    * We have to treat the memory and disk group mappings seperately so they remain
 1763    * valid for their corresponding memory/disk caches. (eg if mem is limited
 1764    * to 100 entries and disk is unlimited, the group mappings will be
 1765    * different).
 1766    *
 1767    * @param key The cache key that we are removing from the groups.
 1768    * @param oldGroups the set of groups we want to remove the cache entry
 1769    * from.
 1770    * @param persist A flag to indicate whether the keys should be removed
 1771    * from the persistent cache layer.
 1772    */
 1773  80 private void removeGroupMappings(String key, Set oldGroups, boolean persist) {
 1774  80 for (Iterator it = oldGroups.iterator(); it.hasNext();) {
 1775  24 String groupName = (String) it.next();
 1776   
 1777    // Update the in-memory groups
 1778  24 if (memoryCaching && (this.groups != null)) {
 1779  18 Set memoryGroup = (Set) groups.get(groupName);
 1780   
 1781  18 if (memoryGroup != null) {
 1782  18 memoryGroup.remove(key);
 1783   
 1784  18 if (memoryGroup.isEmpty()) {
 1785  0 groups.remove(groupName);
 1786    }
 1787    }
 1788    }
 1789   
 1790    // Update the persistent group maps
 1791  24 if (persist) {
 1792  24 Set persistentGroup = persistRetrieveGroup(groupName);
 1793   
 1794  24 if (persistentGroup != null) {
 1795  18 persistentGroup.remove(key);
 1796   
 1797  18 if (persistentGroup.isEmpty()) {
 1798  0 persistRemoveGroup(groupName);
 1799    } else {
 1800  18 persistStoreGroup(groupName, persistentGroup);
 1801    }
 1802    }
 1803    }
 1804    }
 1805    }
 1806   
 1807    /**
 1808    * Updates the groups to reflect the differences between the old and new
 1809    * cache entries. Either of the old or new values can be <code>null</code>
 1810    * or contain a <code>null</code> group list, in which case the entry's
 1811    * groups will all be added or removed respectively.
 1812    *
 1813    * @param oldValue The old CacheEntry that is being replaced.
 1814    * @param newValue The new CacheEntry that is being inserted.
 1815    */
 1816  4106 private void updateGroups(Object oldValue, Object newValue, boolean persist) {
 1817    // If we have/had a CacheEntry, update the group lookups
 1818  4106 boolean oldIsCE = oldValue instanceof CacheEntry;
 1819  4106 boolean newIsCE = newValue instanceof CacheEntry;
 1820   
 1821  4106 if (newIsCE && oldIsCE) {
 1822  4106 updateGroups((CacheEntry) oldValue, (CacheEntry) newValue, persist);
 1823  0 } else if (newIsCE) {
 1824  0 updateGroups(null, (CacheEntry) newValue, persist);
 1825  0 } else if (oldIsCE) {
 1826  0 updateGroups((CacheEntry) oldValue, null, persist);
 1827    }
 1828    }
 1829   
 1830    /**
 1831    * Updates the groups to reflect the differences between the old and new cache entries.
 1832    * Either of the old or new values can be <code>null</code>
 1833    * or contain a <code>null</code> group list, in which case the entry's
 1834    * groups will all be added or removed respectively.
 1835    *
 1836    * @param oldValue The old CacheEntry that is being replaced.
 1837    * @param newValue The new CacheEntry that is being inserted.
 1838    */
 1839  12366 private void updateGroups(CacheEntry oldValue, CacheEntry newValue, boolean persist) {
 1840  12366 Set oldGroups = null;
 1841  12366 Set newGroups = null;
 1842   
 1843  12366 if (oldValue != null) {
 1844  4106 oldGroups = oldValue.getGroups();
 1845    }
 1846   
 1847  12366 if (newValue != null) {
 1848  12366 newGroups = newValue.getGroups();
 1849    }
 1850   
 1851    // Get the names of the groups to remove
 1852  12366 if (oldGroups != null) {
 1853  80 Set removeFromGroups = new HashSet();
 1854   
 1855  80 for (Iterator it = oldGroups.iterator(); it.hasNext();) {
 1856  134 String groupName = (String) it.next();
 1857   
 1858  134 if ((newGroups == null) || !newGroups.contains(groupName)) {
 1859    // We need to remove this group
 1860  24 removeFromGroups.add(groupName);
 1861    }
 1862    }
 1863   
 1864  80 removeGroupMappings(oldValue.getKey(), removeFromGroups, persist);
 1865    }
 1866   
 1867    // Get the names of the groups to add
 1868  12366 if (newGroups != null) {
 1869  152 Set addToGroups = new HashSet();
 1870   
 1871  152 for (Iterator it = newGroups.iterator(); it.hasNext();) {
 1872  252 String groupName = (String) it.next();
 1873   
 1874  252 if ((oldGroups == null) || !oldGroups.contains(groupName)) {
 1875    // We need to add this group
 1876  142 addToGroups.add(groupName);
 1877    }
 1878    }
 1879   
 1880  152 addGroupMappings(newValue.getKey(), addToGroups, persist);
 1881    }
 1882    }
 1883   
 1884    /**
 1885    * AbstractConcurrentReadCache collision list entry.
 1886    */
 1887    protected static class Entry implements Map.Entry {
 1888    protected final Entry next;
 1889    protected final Object key;
 1890   
 1891    /*
 1892    The use of volatile for value field ensures that
 1893    we can detect status changes without synchronization.
 1894    The other fields are never changed, and are
 1895    marked as final.
 1896    */
 1897    protected final int hash;
 1898    protected volatile Object value;
 1899   
 1900  11486 Entry(int hash, Object key, Object value, Entry next) {
 1901  11486 this.hash = hash;
 1902  11486 this.key = key;
 1903  11486 this.next = next;
 1904  11486 this.value = value;
 1905    }
 1906   
 1907    // Map.Entry Ops
 1908  0 public Object getKey() {
 1909  0 return key;
 1910    }
 1911   
 1912    /**
 1913    * Set the value of this entry.
 1914    * Note: In an entrySet or
 1915    * entrySet.iterator), unless the set or iterator is used under
 1916    * synchronization of the table as a whole (or you can otherwise
 1917    * guarantee lack of concurrent modification), <tt>setValue</tt>
 1918    * is not strictly guaranteed to actually replace the value field
 1919    * obtained via the <tt>get</tt> operation of the underlying hash
 1920    * table in multithreaded applications. If iterator-wide
 1921    * synchronization is not used, and any other concurrent
 1922    * <tt>put</tt> or <tt>remove</tt> operations occur, sometimes
 1923    * even to <em>other</em> entries, then this change is not
 1924    * guaranteed to be reflected in the hash table. (It might, or it
 1925    * might not. There are no assurances either way.)
 1926    *
 1927    * @param value the new value.
 1928    * @return the previous value, or null if entry has been detectably
 1929    * removed.
 1930    * @exception NullPointerException if the value is <code>null</code>.
 1931    *
 1932    **/
 1933  0 public Object setValue(Object value) {
 1934  0 if (value == null) {
 1935  0 throw new NullPointerException();
 1936    }
 1937   
 1938  0 Object oldValue = this.value;
 1939  0 this.value = value;
 1940   
 1941  0 return oldValue;
 1942    }
 1943   
 1944    /**
 1945    * Get the value.
 1946    * Note: In an entrySet or entrySet.iterator,
 1947    * unless the set or iterator is used under synchronization of the
 1948    * table as a whole (or you can otherwise guarantee lack of
 1949    * concurrent modification), <tt>getValue</tt> <em>might</em>
 1950    * return null, reflecting the fact that the entry has been
 1951    * concurrently removed. However, there are no assurances that
 1952    * concurrent removals will be reflected using this method.
 1953    *
 1954    * @return the current value, or null if the entry has been
 1955    * detectably removed.
 1956    **/
 1957  0 public Object getValue() {
 1958  0 return value;
 1959    }
 1960   
 1961  0 public boolean equals(Object o) {
 1962  0 if (!(o instanceof Map.Entry)) {
 1963  0 return false;
 1964    }
 1965   
 1966  0 Map.Entry e = (Map.Entry) o;
 1967   
 1968  0 if (!key.equals(e.getKey())) {
 1969  0 return false;
 1970    }
 1971   
 1972  0 Object v = value;
 1973   
 1974  0 return (v == null) ? (e.getValue() == null) : v.equals(e.getValue());
 1975    }
 1976   
 1977  0 public int hashCode() {
 1978  0 Object v = value;
 1979   
 1980  0 return hash ^ ((v == null) ? 0 : v.hashCode());
 1981    }
 1982   
 1983  0 public String toString() {
 1984  0 return key + "=" + value;
 1985    }
 1986   
 1987  0 protected Object clone() {
 1988  0 return new Entry(hash, key, value, ((next == null) ? null : (Entry) next.clone()));
 1989    }
 1990    }
 1991   
 1992    protected class HashIterator implements Iterator, Enumeration {
 1993    protected final Entry[] tab; // snapshot of table
 1994    protected Entry entry = null; // current node of slot
 1995    protected Entry lastReturned = null; // last node returned by next
 1996    protected Object currentKey; // key for current node
 1997    protected Object currentValue; // value for current node
 1998    protected int index; // current slot
 1999   
 2000  48 protected HashIterator() {
 2001  48 tab = AbstractConcurrentReadCache.this.getTableForReading();
 2002  48 index = tab.length - 1;
 2003    }
 2004   
 2005  0 public boolean hasMoreElements() {
 2006  0 return hasNext();
 2007    }
 2008   
 2009  120 public boolean hasNext() {
 2010    /*
 2011    currentkey and currentValue are set here to ensure that next()
 2012    returns normally if hasNext() returns true. This avoids
 2013    surprises especially when final element is removed during
 2014    traversal -- instead, we just ignore the removal during
 2015    current traversal.
 2016    */
 2017  120 for (;;) {
 2018  192 if (entry != null) {
 2019  72 Object v = entry.value;
 2020   
 2021  72 if (v != null) {
 2022  72 currentKey = entry.key;
 2023  72 currentValue = v;
 2024   
 2025  72 return true;
 2026    } else {
 2027  0 entry = entry.next;
 2028    }
 2029    }
 2030   
 2031  120 while ((entry == null) && (index >= 0)) {
 2032  1536 entry = tab[index--];
 2033    }
 2034   
 2035  120 if (entry == null) {
 2036  48 currentKey = currentValue = null;
 2037   
 2038  48 return false;
 2039    }
 2040    }
 2041    }
 2042   
 2043  72 public Object next() {
 2044  72 if ((currentKey == null) && !hasNext()) {
 2045  0 throw new NoSuchElementException();
 2046    }
 2047   
 2048  72 Object result = returnValueOfNext();
 2049  72 lastReturned = entry;
 2050  72 currentKey = currentValue = null;
 2051  72 entry = entry.next;
 2052   
 2053  72 return result;
 2054    }
 2055   
 2056  0 public Object nextElement() {
 2057  0 return next();
 2058    }
 2059   
 2060  0 public void remove() {
 2061  0 if (lastReturned == null) {
 2062  0 throw new IllegalStateException();
 2063    }
 2064   
 2065  0 AbstractConcurrentReadCache.this.remove(lastReturned.key);
 2066    }
 2067   
 2068  0 protected Object returnValueOfNext() {
 2069  0 return entry;
 2070    }
 2071    }
 2072   
 2073    protected class KeyIterator extends HashIterator {
 2074  72 protected Object returnValueOfNext() {
 2075  72 return currentKey;
 2076    }
 2077    }
 2078   
 2079    protected class ValueIterator extends HashIterator {
 2080  0 protected Object returnValueOfNext() {
 2081  0 return currentValue;
 2082    }
 2083    }
 2084    }