View Javadoc

1   /*   Open Source Java Caching Service
2   *    Copyright (C) 2002 Frank Karlstrøm
3   *    This library is free software; you can redistribute it and/or
4   *    modify it under the terms of the GNU Lesser General Public
5   *    License as published by the Free Software Foundation; either
6   *    version 2.1 of the License, or (at your option) any later version.
7   *
8   *    This library is distributed in the hope that it will be useful,
9   *    but WITHOUT ANY WARRANTY; without even the implied warranty of
10  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  *    Lesser General Public License for more details.
12  *
13  *    You should have received a copy of the GNU Lesser General Public
14  *    License along with this library; if not, write to the Free Software
15  *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  *
17  *    The author can be contacted by email: fjankk@users.sourceforge.net
18  */
19  package org.fjank.jcache.persistence;
20  
21  import java.io.File;
22  import java.io.Serializable;
23  import java.util.HashMap;
24  import java.util.Iterator;
25  import javax.util.jcache.CacheAttributes;
26  import javax.util.jcache.DiskCacheException;
27  import org.fjank.jcache.CacheObject;
28  import org.fjank.jcache.DiskCacheObject;
29  import EDU.oswego.cs.dl.util.concurrent.ReadWriteLock;
30  import EDU.oswego.cs.dl.util.concurrent.WriterPreferenceReadWriteLock;
31  
32  
33  
34  /**
35   * Clas implementing a simple disk peristence solution.
36   */
37  public class DiskCache implements Serializable {
38      /**
39       * Indicates whether the cache is 'alive', defined as having been
40       * initialized, but not yet disposed.
41       */
42      private boolean alive;
43  
44      
45  
46      /** the name of the cache */
47      private final String cacheName;
48  
49      /** the cache attributes. 
50       * @todo never used.
51       */
52      private final CacheAttributes cattr;
53  
54      /** the adapter wich contains the actuall data. */
55      private transient CacheFileAdapter dataFile;
56  
57      /** the filename */
58      private final String fileName;
59  
60      /** the adapter which contains the keys and positions */
61      private transient CacheFileAdapter keyFile;
62  
63      /** a map of the keys */
64      private HashMap keyHash;
65  
66      /** the file describing the root of the cachefiles. */
67      private final File rafDir;
68  
69      /**
70       * Each instance of a Disk cache should use this lock to synchronize reads
71       * and writes to the underlying storage mechansism.
72       */
73      private final ReadWriteLock storageLock =  new WriterPreferenceReadWriteLock();
74      /**
75       * The size of the diskCache.
76       */
77  	private int currentSize;
78  
79      /**
80       * Creates a new DiskCache object.
81       *
82       * @param attributes the attributes for this disk cache.
83       * @throws DiskCacheException
84       *
85       * @throws DiskCacheException if any exceptions occur.
86       */
87      public DiskCache(final CacheAttributes attributes) throws DiskCacheException {
88          this.cacheName = "Fjanks FKache";
89          String rootDirName = attributes.getDiskPath();
90          this.cattr = attributes;
91          this.fileName = cacheName;
92          rafDir = new File(rootDirName);
93          rafDir.mkdirs();
94          dataFile = new CacheFileAdapter(new File(rafDir, fileName + ".data"));
95          keyFile = new CacheFileAdapter(new File(rafDir, fileName + ".key"));
96          if (keyFile.length() > 0) {
97              loadKeysFromFile();
98              if (keyHash.size() == 0) {
99                  dataFile.reset();
100             }
101         } else {
102             keyHash = new HashMap();
103             if (dataFile.length() > 0) {
104                 dataFile.reset();
105             }
106         }
107         alive = true;
108     }
109 
110     /**
111      * gets an object from the disk cache
112      *
113      * @param key the key for the object
114      *
115      * @return an object from the disk cache
116      *
117      * @throws DiskCacheException if exceptions occur.
118      */
119     private CacheObject doGet(Serializable key) throws DiskCacheException {
120         try {
121             storageLock.readLock().acquire();
122             if (!alive) {
123                 return null;
124             }
125             return readElement(key);
126         } catch (InterruptedException e) {
127             throw new DiskCacheException("The read was interrupted.");
128         } finally {
129             storageLock.readLock().release();
130         }
131     }
132 
133     /**
134      * Update the disk cache. 
135      *
136      * @param ce the object to write.
137      *
138      * @throws DiskCacheException if exceptions occur.
139      */
140     private void doUpdate(Object key, byte[] data) {
141         try {
142             DiskElementDescriptor ded = new DiskElementDescriptor();
143             ded.init(dataFile.length(), data);
144             storageLock.writeLock().acquire();
145             if (!alive) {
146                 return;
147             }
148             DiskElementDescriptor old =
149                 (DiskElementDescriptor) keyHash.put(key, ded);
150             if ((old != null) && (ded.len <= old.len)) {
151                 ded.pos = old.pos;
152             }
153             dataFile.write(data, ded.pos);
154         } catch (InterruptedException e) {
155             ;
156         } finally {
157             storageLock.writeLock().release();
158         }
159     }
160 
161     /**
162      * gets an object from the diskCache
163      *
164      * @param key the key for the object
165      *
166      * @return an object from the diskCache
167      *
168      * @throws DiskCacheException if exceptions occur.
169      */
170     public final CacheObject getObject(final Serializable key)
171         throws DiskCacheException {
172         if (!alive) {
173             return null;
174         }
175         return doGet(key);
176     }
177 
178     /**
179      * lods keys from an existing cachefile
180      * @throws DiskCacheException
181      *
182      * @throws DiskCacheException if exceptions occur.
183      */
184     private void loadKeysFromFile() throws DiskCacheException  {
185         try {
186             storageLock.readLock().acquire();
187             keyHash = (HashMap) keyFile.readObject(0);
188             if (keyHash == null) {
189                 keyHash = new HashMap();
190             }
191         } catch (InterruptedException e) {
192             ;
193         } finally {
194             storageLock.readLock().release();
195         }
196     }
197 
198     /**
199      * closes the diskcache, and optimizes the disk files.
200      */
201     public void close() {
202         try {
203             storageLock.writeLock().acquire();
204             if (!alive) {
205                 return;
206             }
207             optimizeFile();
208             dataFile.close();
209             dataFile = null;
210             keyFile.close();
211             keyFile = null;
212         } catch (DiskCacheException e) {
213             //duh, so what. its closed down and useless anyway...
214             //the user will get the error when he tries to open it the next time.
215         } catch (InterruptedException e) {
216             ;
217         } finally {
218             alive = false;
219             storageLock.writeLock().release();
220             
221         }
222     }
223 
224     /**
225      * defragments the cachefiles. optimize this to let it be done online.
226      *
227      * @throws DiskCacheException if exceptions occur.
228      */
229     private void optimizeFile() throws DiskCacheException {
230         HashMap keyHashTemp = new HashMap();
231         CacheFileAdapter dataFileTemp =
232             new CacheFileAdapter(new File(rafDir, fileName + "Temp.data"));
233         Iterator itr = keyHash.keySet().iterator();
234         while (itr.hasNext()) {
235             Serializable key = (Serializable) itr.next();
236             CacheObject tempDe = readElement(key);
237             DiskElementDescriptor de = dataFileTemp.appendObject(tempDe);
238             keyHashTemp.put(key, de);
239         }
240         dataFileTemp.close();
241         dataFile.close();
242         File oldData = new File(rafDir, fileName + ".data");
243         if (oldData.exists()) {
244             oldData.delete();
245         }
246         File newData = new File(rafDir, fileName + "Temp.data");
247         File newFileName = new File(rafDir, fileName + ".data");
248         if (newData.exists()) {
249             newData.renameTo(newFileName);
250         }
251         keyHash = keyHashTemp;
252         keyFile.reset();
253         if (keyHash.size() > 0) {
254             keyFile.writeObject(keyHash, 0);
255         }
256     }
257 
258     /**
259      * reads an element from the diskcache
260      *
261      * @param key the key for the diskobject
262      *
263      * @return an element from the diskcache
264      *
265      * @throws DiskCacheException if exceptions occur.
266      */
267     CacheObject readElement(final Serializable key)
268         throws DiskCacheException {
269         DiskElementDescriptor ded = (DiskElementDescriptor) keyHash.get(key);
270         if (ded != null) {
271             Serializable readObject = dataFile.readObject(ded.pos);
272 			return ((DiskCacheObject) readObject).getCacheObject();
273         }
274         throw new DiskCacheException("The object " + key
275             + " was not found in the diskCache.");
276     }
277 
278     /**
279      * Adds the provided element to the cache. 
280      *
281      * @param cacheElement the element to add.
282      */
283     public final boolean update(final CacheObject cacheElement) {
284         byte[] data = CacheFileAdapter.serialize(new DiskCacheObject(cacheElement));
285         int newSize = currentSize+data.length;
286 		int maxSize = cattr.getDiskSize()*1024*1024;
287 		if(newSize>maxSize) {
288         	return false;
289         }
290 		doUpdate(cacheElement.getKey(), data);
291         currentSize=newSize;
292         return true;
293     }
294 
295     /**
296      * Will remove all objects in this diskcache. Just a quick and dirty
297      * implementation to make things work.
298      *
299      * @throws DiskCacheException if removed was not successfull.
300      *
301      * @todo make this operation asynchronous to speed up flushing.
302      */
303     public void removeAll() throws DiskCacheException {
304         try {
305             storageLock.writeLock().acquire();
306         } catch (InterruptedException e) {
307             ;
308         }
309         try {
310             if(dataFile!=null)dataFile.reset();
311             if(keyFile!=null)keyFile.reset();
312             currentSize=0;
313         } finally {
314             storageLock.writeLock().release();
315         }
316     }
317 }