Repository: jankotek/JDBM3 Branch: master Commit: f50c92afe8ff Files: 112 Total size: 833.1 KB Directory structure: gitextract_fufew3er/ ├── LICENSE-2.0.html ├── README.md ├── pom.xml └── src/ ├── main/ │ └── java/ │ └── org/ │ └── apache/ │ └── jdbm/ │ ├── BTree.java │ ├── BTreeLazyRecord.java │ ├── BTreeMap.java │ ├── BTreeNode.java │ ├── BTreeSet.java │ ├── DB.java │ ├── DBAbstract.java │ ├── DBCache.java │ ├── DBCacheMRU.java │ ├── DBCacheRef.java │ ├── DBMaker.java │ ├── DBStore.java │ ├── DataInputOutput.java │ ├── DataInputOutput2.java │ ├── HTree.java │ ├── HTreeBucket.java │ ├── HTreeDirectory.java │ ├── HTreeSet.java │ ├── LinkedList2.java │ ├── LogicalRowIdManager.java │ ├── LongHashMap.java │ ├── LongPacker.java │ ├── Magic.java │ ├── ObjectInputStream2.java │ ├── ObjectOutputStream2.java │ ├── PageFile.java │ ├── PageIo.java │ ├── PageManager.java │ ├── PageTransactionManager.java │ ├── PhysicalFreeRowIdManager.java │ ├── PhysicalRowIdManager.java │ ├── RecordHeader.java │ ├── RecordListener.java │ ├── SerialClassInfo.java │ ├── Serialization.java │ ├── SerializationHeader.java │ ├── Serializer.java │ ├── Storage.java │ ├── StorageDisk.java │ ├── StorageDiskMapped.java │ ├── StorageMemory.java │ ├── StorageZip.java │ ├── Utils.java │ └── packageXX.html └── test/ └── java/ └── org/ └── apache/ └── jdbm/ ├── BTreeBench.java ├── BTreeKeyCompressionTest.java ├── BTreeLeadingValuePackTest.java ├── BTreeMapNavigable2Test.java ├── BTreeMapNavigableSubMapExclusiveTest.java ├── BTreeMapNavigableSubMapInclusiveTest.java ├── BTreeMapNavigableTest.java ├── BTreeMapTest.java ├── BTreeNodeTest.java ├── BTreeSetTest.java ├── BTreeTest.java ├── ByteArrayComparator.java ├── CompactTest.java ├── ConcurrentBTreeReadTest.java ├── ConcurrentMapInterfaceTest.java ├── DBCacheMRUTest.java ├── DBCacheTest.java ├── DBMakerTest.java ├── DBTest.java ├── DataInputOutputTest.java ├── DefragTest.java ├── FileHeaderTest.java ├── FileLockTest.java ├── HTreeBucketTest.java ├── HTreeDirectoryTest.java ├── HTreeMapTest.java ├── HTreeSetTest.java ├── HTreeTest.java ├── LinkedList2Test.java ├── LogicalRowIdManagerTest.java ├── LongHashMapTest.java ├── LongTreeMap.java ├── MapInterfaceTest.java ├── ObjectOutputStream2Test.java ├── PageFileTest.java ├── PageIoTest.java ├── PageManagerTest.java ├── PageTransactionManagerTest.java ├── PhysicalFreeRowIdManagerTest.java ├── PhysicalRowIdManagerTest.java ├── RecordHeaderTest.java ├── RollbackTest.java ├── SerialClassInfoTest.java ├── Serialization2Bean.java ├── Serialization2Test.java ├── SerializationHeaderTest.java ├── SerializationTest.java ├── Serialized2DerivedBean.java ├── StorageDiskMappedTest.java ├── StorageZipTest.java ├── StreamCorrupted.java ├── TestCaseWithTestFile.java ├── TestInsertPerf.java ├── TestInsertUpdate.java ├── TestIssues.java ├── TestLargeData.java ├── TestLazyRecordsInTree.java ├── TestRollback.java ├── TestStress.java ├── UtilTT.java ├── UtilsTest.java └── junk/ ├── HugeData.java ├── MappedBufferGrow.java ├── MappedBufferVersusRaf.java └── RandomInsertLongs.java ================================================ FILE CONTENTS ================================================ ================================================ FILE: LICENSE-2.0.html ================================================ Apache License, Version 2.0 - The Apache Software Foundation

Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/

TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

1. Definitions.

"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.

"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.

"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.

"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.

"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.

"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.

"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).

"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.

"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."

"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.

2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.

3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.

4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:

  1. You must give any other recipients of the Work or Derivative Works a copy of this License; and

  2. You must cause any modified files to carry prominent notices stating that You changed the files; and

  3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and

  4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.

5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.

6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.

7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.

8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.

9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.

END OF TERMS AND CONDITIONS

================================================ FILE: README.md ================================================ **NOTE: this project is in maintenance mode (bug fix only), I redirected my effort to JDBM4 which should provide better concurrent scalability** JDBM provides TreeMap, HashMap and other collections backed up by disk storage. Now you can handle billions of items without ever running out of memory. JDBM is probably the fastest and the simpliest pure Java database. JDBM is tiny (160KB nodeps jar), but packed with features such as transactions, instance cache and space efficient serialization. It also has outstanding performance with 1 million inserts per second and 10 million fetches per second (disk based!!). It is tightly optimized and has minimal overhead. It scales well from Android phone to multi-terrabyte data sets. JDBM is opensource and free-as-beer under Apache license. There is no catch and no strings attached. News ==== 4th Sep 2012 - JDBM3 alpha4 was released. [Just bugfixes](https://groups.google.com/forum/?fromgroups=#!topic/jdbm/yBB4dLW54Pk) 18st Aug 2012 - First version of JDBM4 is available on [GitHub](https://github.com/jankotek/JDBM4) 30th Apr 2012 - JDBM3 [may soon become part of Apache Foundation](https://groups.google.com/forum/?fromgroups#!topic/jdbm/pb4LWr6qTxM). This will not affect github site, but package may be renamed in a few days (done). 10th Apr 2012 - Alpha3 was just released. Get [binary jar](https://github.com/downloads/jankotek/JDBM3/JDBM-3.0-alpha3.jar) and [read some notes](http://groups.google.com/group/jdbm/browse_thread/thread/db1f0ed52ce5fb3c) 24th Feb 2012 - Alpha2 released with tons of bugfixes. Get [binary jar](https://github.com/downloads/jankotek/JDBM3/JDBM-3.0-alpha2.jar) 18th Jan 2012 - Alpha1 released, [announcement](http://kotek.net/blog/jdbm_3.0_alpha_1_released) and [binary jar](https://github.com/downloads/jankotek/JDBM3/JDBM-3.0-alpha-1.jar) Features ======== * B*Tree with `ConcurrentNavigableMap` interface * Very fast for sequential read/write. * Small values stored inside tree nodes * Small values stored inside tree nodes, large values lazily fetched. * Self-balancing, great performance even with 1e12 items. * Delta compression on keys * Submaps (aka cursors) to view limited collection subsets * Custom comparators * H*Tree with `ConcurrentMap` interface * Optimized for random reads/writes * Small values stored inside tree nodes, large values lazily fetched. * Self-balancing, great performance even with 1e12 items. * TreeSet and HashSet which uses BTree and HTree without values * LinkedList, which implements bounded BlockingDeque (not implemented yet) * Multi code scalability (currently under testing) * Everything is thread safe * Reads should scale linearly with number of cores (as soon as it fits into cache) * All collection implements `Concurrent` interfaces * Some multi-core scalability with `ReentrantReadWriteLock`. * Instance cache * If data fits into cache, reads are almost as fast as in-memory collections. * Minimal overhead, works well even with 16MB heap. * Scales well into 64GB RAM heaps. * Various yet simple tuning options * Transactions * Single transaction per store, avoids concurrent modification stuff * Transactions are ACID (with limits for single concurrent transaction) * Option to disable transactions for fast inserts/updates * Low level key-value store * Various options for on-disk store (NIO, RAF, locking...) * Write performance not affected by store fragmentation * In-memory store option * Can read data from zip file with reasonable performance * Can read data from classpath resource, database is deployable over Java Web Start * Advanced defragmentation * Print store statistics * Transparent data encryption * Only 9 bytes overhead per record (for example BTree node) * Space efficient serialization * Custom code for most `java.util` and `java.lang` classes. For example Long(0) takes only single byte * Very small POJO serialization overhead, typically only 3 bytes per class + 1 byte for each field. * Mimic java serialization, fields can be `transient`, all classes needs to implement `Serializable` interface * Supports `Externalizable` * Possible to plug your own `Serializer` * Performance * Blazing fast 1 million inserts / 10 million reads per second (on my 5GHz machine, but you should get 300000 inserts p.s. easily) * Tightly optimized code * Uses NIO stuff you read about, but never see in action. * Minimal heap usage, prevents `java.lang.OutOfMemoryError: GC overhead limit` * Most logic done with primitives or arrays. Minimal stack usage. Introduction ============ All classes are contained in package `org.apache..jdbm`. There are only two important classes: `DBMaker` is builder which configures and opens database. `DB` is database itself, it opens collections and controls transactions. Collections in JDBM mimic their `java.util` counter parts. TreeMap uses an on-disk ordered auto-balanced B*Tree index, LinkedList is stored as self referencing entries and so on. Everything should be thread safe (currently under testing). Maven Dependency ---------------- JDBM is not currently in any Maven repository. TODO: We should have soon custom repo with nightly builds. Quick example ------------- import org.apache.jdbm.*; //Open database using builder pattern. //All options are available with code autocompletion. DB db = DBMaker.openFile("test") .deleteFilesAfterClose() .enableEncryption("password",false) .make(); //open an collection, TreeMap has better performance then HashMap SortedMap map = db.createTreeMap("collectionName"); map.put(1,"one"); map.put(2,"two"); //map.keySet() is now [1,2] even before commit db.commit(); //persist changes into disk map.put(3,"three"); //map.keySet() is now [1,2,3] db.rollback(); //revert recent changes //map.keySet() is now [1,2] db.close(); A few quick tricks ------------------ * Disabling transaction increases write performance 6x. Do it by `DBMaker.disableTransactions()`. Do not forget to close store correctly in this case! * When transactions are enabled all uncommited instances are stored in memory. Make sure you commit on time. It is most common cause of `OutOfMemoryError`. * JDBM does not try to reclaim unused space after massive delete, you must call `DB.defrag(false)` yourself. * TreeMap has usually better performance then HashMap. * JDBM uses instance cache with limited size by default. If you have enought memory and large store, use unbounded cache: `DBMaker.enableHardCache()` * JDBM is optimized for small size records. Sizes: 16 bytes is recommended, 32KB is reasonable maximum, 8MB is hard limit. * JDBM scales well up to 1e12 records. Batch insert overnight creates multi-terrabyte store. DBMaker ------- TODO DB -- TODO Collections ----------- TODO Instance cache -------------- JDBM caches created instances similar way as Hibernate or other ORM frameworks. This greatly reduces serialization overhead and speedups database. There are five cache types, each configurable with method on `DBMaker` builder: * **Most Recently Used** (MRU) cache. It is fixed size and stores newest entries. This cache is on by default. You can configure its size, default size is 2048. This cache has lowest GC overhead and may be suprisingly faster then other cache types. * **No cache**. You may disable instance cache by using `DBMaker.disableCache()` * **Hard reference cache**. All instances fetched by JDBM are stored in cache until released. Good with large memory heaps. `Hard` cache is recommended over `soft` and `weak` as it has smaller overhead. Use `DBMaker.enableHardCache()` to enable it. * **Weak reference cache**. Instances are referenced using `WeakReference`. When item is no longer referenced by other instances, it can be discarded by GC. Use `DBMaker.enableWeakCache()` to enable it. * **Soft reference cache**. Instances are referenced using `SoftReference`. Similar to `WeakReference` but holds longer, until systems starts running out of memory. Use `DBMaker.enableSoftCache()` to enable it. With Weak/Soft/Hard cache JDBM starts backround cleanup thread. It also checks memory usage every 10 seconds, if free memory is bellow 25%, it clears cache. Our tests shows that GC is not fast enought to prevent `OutOfMemoryError`. This may be disabled with `DBMaker.disableCacheAutoClear()`. You may clear cache manually using `DB.clearCache()`. This is usefull after massive delete, or when you are moving from one type of data to other. Transactions ------------ JDBM supports single transaction per store. It does not have multiple concurrent transactions with row/table locks, pessimistic locking and similar stuff. This trade off greatly simplifies design and speeds up operations. Transactions are still 'ACID' but in limited way. Transaction implementation is sound and solid. Uncommited data are stored in memory. Then during commit appended to end of transaction log file. It is safe, as append operation hardly ever corrupts file. After commit is finished, data are replayed from transaction log file into main storage file. If users calls rollback, transaction log file is discarded. Keeping transaction log file brings some overhead. It is possible to disable transaction and write changes directly into main storage file. It makes inserts and updates about 6x faster. In this case no effort is made to protect file from corruption, all is sacrificed for maximal speed. It is absolutely necessary to properly close storage before exit. You may disable transactions by using `DBMaker.disableTransactions()`. Uncommited instances are stored in memory and flushed to disk during commit. So with large transactions you may run out of memory easily. With disabled transactions data are stored in 10 MB memory buffer and flushed to main storage file when buffer is filled. Serialization ------------- JDBM has its own space-efficient serialization which tries to mimic standard implementation. All classes must implement `Serializable` interface. You may exclude field from serialization by `transient` keyword. Our serialization also handles cyclic references and some other advanced stuff. You may use your own binary format with `Externalizable` interface or custom `Serializer`. JDBM has custom serialization code for most classes in `java.lang` and `java.util` packages. For `Date` JDBM writes only 9 bytes: 1-byte-long serialization header and 8-byte-long timestamp. For `true`, `String("")` or `Long(3)` JDBM writes only single-byte serialization header. For array list and other collections JDBM writes serialization header, packed size and data. Custom serializers have maximal space efficiency and low overhead. Standard java serialization stores class structure data (field names, types...) with record data. This generates huge overhead which multiplies with number of records. JDBM serialization stores class structure data in single space and record data only contains reference. So space overhead for POJOs is typically only 3 bytes per class + 1 byte for each field. Our serialization is designed to be very fast on small chunks of data (a few POJOs glued together). With couple of thousands nodes in object tree it becomes slow (N^2 scalability). Maximal record size in JDBM is 8 MB, so it is good practise to store only small key/value in database. You should always use filesystem for data larger then 500KB. Defragmentation --------------- Store gets fragmented. JDBM is well designed, so this does not slows down write/update/delete operations. But fragmentation slows down read operations as more data needs to be readed from disk. JDBM does not do any sort of magic to reclaim unused data. It relies on user to call `DB.defrag` periodically or after massive update/delete/inserts. Defrag can be called at runtime, but `DB.defrag` methods blocks other read/writes until it finishes. Defrag basically recreates copyes data from one store to second store. Then it deletes first store and renames second. Defragnentation has two modes controlled by `DB.defrag(boolean fullDefrag)` parameter: **Quick defrag** is designed to be as fast as possible. It only reclaims unused space (compacts store), but does not reorganize data inside store. It copyes all data from one store to other, without empty spaces between records. It is very fast, limited only by disk sequential write speed. Call it by `DB.defrag(false)` **Full defrag** is designed to make store as fast as possible. It reorganizes data layout, so nodes from single collection are stored close to each other. This makes future reads from store faster as less data needs to be read. Full defrag is much slower than quick defrag, as it traverses and copies all collections unsequentially. Troubleshooting =============== Please report bug into Github error tracker. There is [mail-group](mailto:jdbm@googlegroups.com) if you would have questions, you may also browse [group archive](http://groups.google.com/group/jdbm). JDBM uses chained exception so user does not have to write try catch blocks. IOException is usually wrapped in IOError which is unchecked. So please always check first exception. **OutOfMemoryError** JDBM keeps uncommited data in memory, so you may need to commit more often. If your memory is limited use MRU cache (on by default). You may increase heap size by starting JVM with extra parameter `-Xmx500MB`. **OutOfMemoryError: GC overhead limit exceeded** Your app is creating new object instances faster then GC can collect them. When using Soft/Weak cache use Hard cache to reduce GC overhead (is auto cleared when free memory is low). There is JVM parameter to disable this assertion. **File locking, OverlappingFileLockException, some IOError** You are trying to open file already opened by another JDBM. Make sure that you `DB.close()` store correctly, operating system may leave lock after JVM is terminated. You may try `DBMaker.useRandomAccessFile()` which is slower, but does not use such aggressive locking. In read-only mode you can also open store multiple times. You may also disable file locks completely by `DB.disableFileLock()` (at your own risk of course) **InternalError, Error, AssertionFailedError, IllegalArgumentException, StackOverflowError and so on** There was an problem in JDBM. It is possible that file store was corrupted thanks to an internal error or disk failure. Disabling cache by `DBMaker.disableCache()` may workaround the problem. Please submit bug report to github. --- Special thanks to EJ-Technologies for donating us excellent [JProfiler](http://www.ej-technologies.com/products/overview.html) ================================================ FILE: pom.xml ================================================ 4.0.0 org.apache.jdbm jdbm 3.0-SNAPSHOT Jan Kotek jan Apache 2 UTF-8 junit junit 4.8.2 jar test false org.apache.maven.plugins maven-compiler-plugin 2.3.2 1.5 1.5 ${project.build.sourceEncoding} org.apache.maven.plugins maven-resources-plugin 2.5 ${project.build.sourceEncoding} org.apache.maven.plugins maven-source-plugin 2.1.2 attach-sources package jar test-jar ================================================ FILE: src/main/java/org/apache/jdbm/BTree.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; /** * B+Tree persistent indexing data structure. B+Trees are optimized for * block-based, random I/O storage because they store multiple keys on * one tree node (called BTreeNode). In addition, the leaf nodes * directly contain (inline) small values associated with the keys, allowing a * single (or sequential) disk read of all the values on the node. *

* B+Trees are n-airy, yeilding log(N) search cost. They are self-balancing, * preventing search performance degradation when the size of the tree grows. *

* BTree stores its keys sorted. By default JDBM expects key to implement * Comparable interface but user may supply its own Comparator * at BTree creation time. Comparator is serialized and stored as part of BTree. *

* The B+Tree allows traversing the keys in forward and reverse order using a * TupleBrowser obtained from the browse() methods. But it is better to use * BTreeMap wrapper which implements SortedMap interface *

* This implementation does not directly support duplicate keys. It is * possible to handle duplicates by grouping values using an ArrayList as value. * This scenario is supported by JDBM serialization so there is no big performance penalty. *

* There is no limit on key size or value size, but it is recommended to keep * keys as small as possible to reduce disk I/O. If serialized value exceeds 32 bytes, * it is stored in separate record and tree contains only recid reference to it. * BTree uses delta compression for its keys. * * * @author Alex Boisvert * @author Jan Kotek */ class BTree { private static final boolean DEBUG = false; /** * Default node size (number of entries per node) */ public static final int DEFAULT_SIZE = 32; //TODO test optimal size, it has serious impact on sequencial write and read /** * Record manager used to persist changes in BTreeNodes */ protected transient DBAbstract _db; /** * This BTree's record ID in the DB. */ private transient long _recid; /** * Comparator used to index entries (optional) */ protected Comparator _comparator; /** * Serializer used to serialize index keys (optional) */ protected Serializer keySerializer; /** * Serializer used to serialize index values (optional) */ protected Serializer valueSerializer; /** * indicates if values should be loaded during deserialization, set to false during defragmentation */ boolean loadValues = true; /** if false map contains only keys, used for set*/ boolean hasValues = true; /** * The number of structural modifications to the tree for fail fast iterators. This value is just for runtime, it is not persisted */ transient int modCount = 0; /** * cached instance of an insert result, so we do not have to allocate new object on each insert */ protected BTreeNode.InsertResult insertResultReuse; //TODO investigate performance impact of removing this public Serializer getKeySerializer() { return keySerializer; } public Serializer getValueSerializer() { return valueSerializer; } /** * Height of the B+Tree. This is the number of BTreeNodes you have to traverse * to get to a leaf BTreeNode, starting from the root. */ private int _height; /** * Recid of the root BTreeNode */ private transient long _root; /** * Total number of entries in the BTree */ protected volatile long _entries; /** * Serializer used for BTreeNodes of this tree */ private transient BTreeNode _nodeSerializer = new BTreeNode(); { _nodeSerializer._btree = this; } /** * Listeners which are notified about changes in records */ protected RecordListener[] recordListeners = new RecordListener[0]; final protected ReadWriteLock lock = new ReentrantReadWriteLock(); /** * No-argument constructor used by serialization. */ public BTree() { // empty } /** * Create a new persistent BTree */ @SuppressWarnings("unchecked") public static BTree createInstance(DBAbstract db) throws IOException { return createInstance(db, null, null, null,true); } /** * Create a new persistent BTree */ public static BTree createInstance(DBAbstract db, Comparator comparator, Serializer keySerializer, Serializer valueSerializer, boolean hasValues) throws IOException { BTree btree; if (db == null) { throw new IllegalArgumentException("Argument 'db' is null"); } btree = new BTree(); btree._db = db; btree._comparator = comparator; btree.keySerializer = keySerializer; btree.valueSerializer = valueSerializer; btree.hasValues = hasValues; btree._recid = db.insert(btree, btree.getRecordManager().defaultSerializer(),false); return btree; } /** * Load a persistent BTree. * * @param db DB used to store the persistent btree * @param recid Record id of the BTree */ @SuppressWarnings("unchecked") public static BTree load(DBAbstract db, long recid) throws IOException { BTree btree = (BTree) db.fetch(recid); btree._recid = recid; btree._db = db; btree._nodeSerializer = new BTreeNode(); btree._nodeSerializer._btree = btree; return btree; } /** * Get the {@link ReadWriteLock} associated with this BTree. * This should be used with browsing operations to ensure * consistency. * * @return */ public ReadWriteLock getLock() { return lock; } /** * Insert an entry in the BTree. *

* The BTree cannot store duplicate entries. An existing entry can be * replaced using the replace flag. If an entry with the * same key already exists in the BTree, its value is returned. * * @param key Insert key * @param value Insert value * @param replace Set to true to replace an existing key-value pair. * @return Existing value, if any. */ public V insert(final K key, final V value, final boolean replace) throws IOException { if (key == null) { throw new IllegalArgumentException("Argument 'key' is null"); } if (value == null) { throw new IllegalArgumentException("Argument 'value' is null"); } try { lock.writeLock().lock(); BTreeNode rootNode = getRoot(); if (rootNode == null) { // BTree is currently empty, create a new root BTreeNode if (DEBUG) { System.out.println("BTree.insert() new root BTreeNode"); } rootNode = new BTreeNode(this, key, value); _root = rootNode._recid; _height = 1; _entries = 1; _db.update(_recid, this); modCount++; //notifi listeners for (RecordListener l : recordListeners) { l.recordInserted(key, value); } return null; } else { BTreeNode.InsertResult insert = rootNode.insert(_height, key, value, replace); boolean dirty = false; if (insert._overflow != null) { // current root node overflowed, we replace with a new root node if (DEBUG) { System.out.println("BTreeNode.insert() replace root BTreeNode due to overflow"); } rootNode = new BTreeNode(this, rootNode, insert._overflow); _root = rootNode._recid; _height += 1; dirty = true; } if (insert._existing == null) { _entries++; modCount++; dirty = true; } if (dirty) { _db.update(_recid, this); } //notify listeners for (RecordListener l : recordListeners) { if (insert._existing == null) l.recordInserted(key, value); else l.recordUpdated(key, insert._existing, value); } // insert might have returned an existing value V ret = insert._existing; //zero out tuple and put it for reuse insert._existing = null; insert._overflow = null; this.insertResultReuse = insert; return ret; } } finally { lock.writeLock().unlock(); } } /** * Remove an entry with the given key from the BTree. * * @param key Removal key * @return Value associated with the key, or null if no entry with given * key existed in the BTree. */ public V remove(K key) throws IOException { if (key == null) { throw new IllegalArgumentException("Argument 'key' is null"); } try { lock.writeLock().lock(); BTreeNode rootNode = getRoot(); if (rootNode == null) { return null; } boolean dirty = false; BTreeNode.RemoveResult remove = rootNode.remove(_height, key); if (remove._underflow && rootNode.isEmpty()) { _height -= 1; dirty = true; _db.delete(_root); if (_height == 0) { _root = 0; } else { _root = rootNode.loadLastChildNode()._recid; } } if (remove._value != null) { _entries--; modCount++; dirty = true; } if (dirty) { _db.update(_recid, this); } if (remove._value != null) for (RecordListener l : recordListeners) l.recordRemoved(key, remove._value); return remove._value; } finally { lock.writeLock().unlock(); } } /** * Find the value associated with the given key. * * @param key Lookup key. * @return Value associated with the key, or null if not found. */ public V get(K key) throws IOException { if (key == null) { throw new IllegalArgumentException("Argument 'key' is null"); } try { lock.readLock().lock(); BTreeNode rootNode = getRoot(); if (rootNode == null) { return null; } return rootNode.findValue(_height, key); } finally { lock.readLock().unlock(); } } /** * Find the value associated with the given key, or the entry immediately * following this key in the ordered BTree. * * @param key Lookup key. * @return Value associated with the key, or a greater entry, or null if no * greater entry was found. */ public BTreeTuple findGreaterOrEqual(K key) throws IOException { BTreeTuple tuple; BTreeTupleBrowser browser; if (key == null) { // there can't be a key greater than or equal to "null" // because null is considered an infinite key. return null; } tuple = new BTreeTuple(null, null); browser = browse(key,true); if (browser.getNext(tuple)) { return tuple; } else { return null; } } /** * Get a browser initially positioned at the beginning of the BTree. *

* WARNING: If you make structural modifications to the BTree during * browsing, you will get inconsistent browing results. * * * @return Browser positionned at the beginning of the BTree. */ @SuppressWarnings("unchecked") public BTreeTupleBrowser browse() throws IOException { try { lock.readLock().lock(); BTreeNode rootNode = getRoot(); if (rootNode == null) { return EMPTY_BROWSER; } return rootNode.findFirst(); } finally { lock.readLock().unlock(); } } /** * Get a browser initially positioned just before the given key. *

* WARNING: �If you make structural modifications to the BTree during * browsing, you will get inconsistent browing results. * * * @param key Key used to position the browser. If null, the browser * will be positionned after the last entry of the BTree. * (Null is considered to be an "infinite" key) * @return Browser positionned just before the given key. */ @SuppressWarnings("unchecked") public BTreeTupleBrowser browse(final K key, final boolean inclusive) throws IOException { try { lock.readLock().lock(); BTreeNode rootNode = getRoot(); if (rootNode == null) { return EMPTY_BROWSER; } BTreeTupleBrowser browser = rootNode.find(_height, key, inclusive); return browser; } finally { lock.readLock().unlock(); } } /** * Return the persistent record identifier of the BTree. */ public long getRecid() { return _recid; } /** * Return the root BTreeNode, or null if it doesn't exist. */ BTreeNode getRoot() throws IOException { if (_root == 0) { return null; } BTreeNode root = _db.fetch(_root, _nodeSerializer); if (root != null) { root._recid = _root; root._btree = this; } return root; } static BTree readExternal(DataInput in, Serialization ser) throws IOException, ClassNotFoundException { BTree tree = new BTree(); tree._db = ser.db; tree._height = in.readInt(); tree._recid = in.readLong(); tree._root = in.readLong(); tree._entries = in.readLong(); tree.hasValues = in.readBoolean(); tree._comparator = (Comparator) ser.deserialize(in); tree.keySerializer = (Serializer) ser.deserialize(in); tree.valueSerializer = (Serializer) ser.deserialize(in); return tree; } public void writeExternal(DataOutput out) throws IOException { out.writeInt(_height); out.writeLong(_recid); out.writeLong(_root); out.writeLong(_entries); out.writeBoolean(hasValues); _db.defaultSerializer().serialize(out, _comparator); _db.defaultSerializer().serialize(out, keySerializer); _db.defaultSerializer().serialize(out, valueSerializer); } /** * Copyes tree from one db to other, defragmenting it allong the way * @param recid * @param r1 * @param r2 * @throws IOException */ public static void defrag(long recid, DBStore r1, DBStore r2) throws IOException { try { byte[] data = r1.fetchRaw(recid); r2.forceInsert(recid, data); DataInput in = new DataInputOutput(data); BTree t = (BTree) r1.defaultSerializer().deserialize(in); t.loadValues = false; t._db = r1; t._nodeSerializer = new BTreeNode(t, false); BTreeNode p = t.getRoot(); if (p != null) { r2.forceInsert(t._root, r1.fetchRaw(t._root)); p.defrag(r1, r2); } } catch (ClassNotFoundException e) { throw new IOError(e); } } /** * Browser returning no element. */ private static final BTreeTupleBrowser EMPTY_BROWSER = new BTreeTupleBrowser() { public boolean getNext(BTreeTuple tuple) { return false; } public boolean getPrevious(BTreeTuple tuple) { return false; } public void remove(Object key) { throw new IndexOutOfBoundsException(); } }; /** * add RecordListener which is notified about record changes * * @param listener */ public void addRecordListener(RecordListener listener) { recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1); recordListeners[recordListeners.length - 1] = listener; } /** * remove RecordListener which is notified about record changes * * @param listener */ public void removeRecordListener(RecordListener listener) { List l = Arrays.asList(recordListeners); l.remove(listener); recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]); } public DBAbstract getRecordManager() { return _db; } public Comparator getComparator() { return _comparator; } /** * Deletes all BTreeNodes in this BTree */ public void clear() throws IOException { try { lock.writeLock().lock(); BTreeNode rootNode = getRoot(); if (rootNode != null) rootNode.delete(); _entries = 0; modCount++; } finally { lock.writeLock().unlock(); } } /** * Used for debugging and testing only. Populates the 'out' list with * the recids of all child nodes in the BTree. * * @param out * @throws IOException */ void dumpChildNodeRecIDs(List out) throws IOException { BTreeNode root = getRoot(); if (root != null) { out.add(root._recid); root.dumpChildNodeRecIDs(out, _height); } } public boolean hasValues() { return hasValues; } /** * Browser to traverse a collection of tuples. The browser allows for * forward and reverse order traversal. * * */ static interface BTreeTupleBrowser { /** * Get the next tuple. * * @param tuple Tuple into which values are copied. * @return True if values have been copied in tuple, or false if there is no next tuple. */ boolean getNext(BTree.BTreeTuple tuple) throws IOException; /** * Get the previous tuple. * * @param tuple Tuple into which values are copied. * @return True if values have been copied in tuple, or false if there is no previous tuple. */ boolean getPrevious(BTree.BTreeTuple tuple) throws IOException; /** * Remove an entry with given key, and increases browsers expectedModCount * This method is here to support 'ConcurrentModificationException' on Map interface. * * @param key */ void remove(K key) throws IOException; } /** * Tuple consisting of a key-value pair. */ static final class BTreeTuple { K key; V value; BTreeTuple() { // empty } BTreeTuple(K key, V value) { this.key = key; this.value = value; } } } ================================================ FILE: src/main/java/org/apache/jdbm/BTreeLazyRecord.java ================================================ package org.apache.jdbm; import java.io.*; /** * An record lazily loaded from store. * This is used in BTree/HTree to store big records outside of index tree * * @author Jan Kotek */ class BTreeLazyRecord { private E value = null; private DBAbstract db; private Serializer serializer; final long recid; BTreeLazyRecord(DBAbstract db, long recid, Serializer serializer) { this.db = db; this.recid = recid; this.serializer = serializer; } E get() { if (value != null) return value; try { value = db.fetch(recid, serializer); } catch (IOException e) { throw new IOError(e); } return value; } void delete() { try { db.delete(recid); } catch (IOException e) { throw new IOError(e); } value = null; serializer = null; db = null; } /** * Serialier used to insert already serialized data into store */ static final Serializer FAKE_SERIALIZER = new Serializer() { public void serialize(DataOutput out, Object obj) throws IOException { byte[] data = (byte[]) obj; out.write(data); } public Object deserialize(DataInput in) throws IOException, ClassNotFoundException { throw new UnsupportedOperationException(); } }; static Object fastDeser(DataInputOutput in, Serializer serializer, int expectedSize) throws IOException, ClassNotFoundException { //we should propably copy data for deserialization into separate buffer and pass it to Serializer //but to make it faster, Serializer will operate directly on top of buffer. //and we check that it readed correct number of bytes. int origAvail = in.available(); if (origAvail == 0) throw new InternalError(); //is backed up by byte[] buffer, so there should be always avail bytes Object ret = serializer.deserialize(in); //check than valueSerializer did not read more bytes, if yes it readed bytes from next record int readed = origAvail - in.available(); if (readed > expectedSize) throw new IOException("Serializer readed more bytes than is record size."); else if (readed != expectedSize) { //deserializer did not readed all bytes, unussual but valid. //Skip some to get into correct position for (int ii = 0; ii < expectedSize - readed; ii++) in.readUnsignedByte(); } return ret; } /** * if value in tree is serialized in more bytes, it is stored as separate record outside of tree * This value must be always smaller than 250 */ static final int MAX_INTREE_RECORD_SIZE = 32; static { if (MAX_INTREE_RECORD_SIZE > 250) throw new Error(); } static final int NULL = 255; static final int LAZY_RECORD = 254; } ================================================ FILE: src/main/java/org/apache/jdbm/BTreeMap.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOError; import java.io.IOException; import java.util.*; import java.util.concurrent.ConcurrentNavigableMap; /** * Wrapper for BTree which implements ConcurrentNavigableMap interface * * @param key type * @param value type * * @author Jan Kotek */ class BTreeMap extends AbstractMap implements ConcurrentNavigableMap { protected BTree tree; protected final K fromKey; protected final K toKey; protected final boolean readonly; protected NavigableSet keySet2; private final boolean toInclusive; private final boolean fromInclusive; public BTreeMap(BTree tree, boolean readonly) { this(tree, readonly, null, false, null, false); } protected BTreeMap(BTree tree, boolean readonly, K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) { this.tree = tree; this.fromKey = fromKey; this.fromInclusive = fromInclusive; this.toKey = toKey; this.toInclusive = toInclusive; this.readonly = readonly; } @Override public Set> entrySet() { return _entrySet; } private final Set> _entrySet = new AbstractSet>() { protected Entry newEntry(K k, V v) { return new SimpleEntry(k, v) { private static final long serialVersionUID = 978651696969194154L; public V setValue(V arg0) { BTreeMap.this.put(getKey(), arg0); return super.setValue(arg0); } }; } public boolean add(java.util.Map.Entry e) { if (readonly) throw new UnsupportedOperationException("readonly"); try { if (e.getKey() == null) throw new NullPointerException("Can not add null key"); if (!inBounds(e.getKey())) throw new IllegalArgumentException("key outside of bounds"); return tree.insert(e.getKey(), e.getValue(), true) == null; } catch (IOException e1) { throw new IOError(e1); } } @SuppressWarnings("unchecked") public boolean contains(Object o) { if (o instanceof Entry) { Entry e = (java.util.Map.Entry) o; try { if (!inBounds(e.getKey())) return false; if (e.getKey() != null && tree.get(e.getKey()) != null) return true; } catch (IOException e1) { throw new IOError(e1); } } return false; } public Iterator> iterator() { try { final BTree.BTreeTupleBrowser br = fromKey == null ? tree.browse() : tree.browse(fromKey, fromInclusive); return new Iterator>() { private Entry next; private K lastKey; void ensureNext() { try { BTree.BTreeTuple t = new BTree.BTreeTuple(); if (br.getNext(t) && inBounds(t.key)) next = newEntry(t.key, t.value); else next = null; } catch (IOException e1) { throw new IOError(e1); } } { ensureNext(); } public boolean hasNext() { return next != null; } public java.util.Map.Entry next() { if (next == null) throw new NoSuchElementException(); Entry ret = next; lastKey = ret.getKey(); //move to next position ensureNext(); return ret; } public void remove() { if (readonly) throw new UnsupportedOperationException("readonly"); if (lastKey == null) throw new IllegalStateException(); try { br.remove(lastKey); lastKey = null; } catch (IOException e1) { throw new IOError(e1); } } }; } catch (IOException e) { throw new IOError(e); } } @SuppressWarnings("unchecked") public boolean remove(Object o) { if (readonly) throw new UnsupportedOperationException("readonly"); if (o instanceof Entry) { Entry e = (java.util.Map.Entry) o; try { //check for nulls if (e.getKey() == null || e.getValue() == null) return false; if (!inBounds(e.getKey())) throw new IllegalArgumentException("out of bounds"); //get old value, must be same as item in entry V v = get(e.getKey()); if (v == null || !e.getValue().equals(v)) return false; V v2 = tree.remove(e.getKey()); return v2 != null; } catch (IOException e1) { throw new IOError(e1); } } return false; } public int size() { return BTreeMap.this.size(); } public void clear(){ if(fromKey!=null || toKey!=null) super.clear(); else try { tree.clear(); } catch (IOException e) { throw new IOError(e); } } }; public boolean inBounds(K e) { if(fromKey == null && toKey == null) return true; Comparator comp = comparator(); if (comp == null) comp = Utils.COMPARABLE_COMPARATOR; if(fromKey!=null){ final int compare = comp.compare(e, fromKey); if(compare<0) return false; if(!fromInclusive && compare == 0) return false; } if(toKey!=null){ final int compare = comp.compare(e, toKey); if(compare>0)return false; if(!toInclusive && compare == 0) return false; } return true; } @SuppressWarnings("unchecked") @Override public V get(Object key) { try { if (key == null) return null; if (!inBounds((K) key)) return null; return tree.get((K) key); } catch (ClassCastException e) { return null; } catch (IOException e) { throw new IOError(e); } } @SuppressWarnings("unchecked") @Override public V remove(Object key) { if (readonly) throw new UnsupportedOperationException("readonly"); try { if (key == null || tree.get((K) key) == null) return null; if (!inBounds((K) key)) throw new IllegalArgumentException("out of bounds"); return tree.remove((K) key); } catch (ClassCastException e) { return null; } catch (IOException e) { throw new IOError(e); } } public V put(K key, V value) { if (readonly) throw new UnsupportedOperationException("readonly"); try { if (key == null || value == null) throw new NullPointerException("Null key or value"); if (!inBounds(key)) throw new IllegalArgumentException("out of bounds"); return tree.insert(key, value, true); } catch (IOException e) { throw new IOError(e); } } public void clear(){ entrySet().clear(); } @SuppressWarnings("unchecked") @Override public boolean containsKey(Object key) { if (key == null) return false; try { if (!inBounds((K) key)) return false; V v = tree.get((K) key); return v != null; } catch (IOException e) { throw new IOError(e); } catch (ClassCastException e) { return false; } } public Comparator comparator() { return tree._comparator; } public K firstKey() { if (isEmpty()) return null; try { BTree.BTreeTupleBrowser b = fromKey == null ? tree.browse() : tree.browse(fromKey,fromInclusive); BTree.BTreeTuple t = new BTree.BTreeTuple(); b.getNext(t); return t.key; } catch (IOException e) { throw new IOError(e); } } public K lastKey() { if (isEmpty()) return null; try { BTree.BTreeTupleBrowser b = toKey == null ? tree.browse(null,true) : tree.browse(toKey,false); BTree.BTreeTuple t = new BTree.BTreeTuple(); b.getPrevious(t); if(!toInclusive && toKey!=null){ //make sure we wont return last key Comparator c = comparator(); if(c==null) c=Utils.COMPARABLE_COMPARATOR; if(c.compare(t.key,toKey)==0) b.getPrevious(t); } return t.key; } catch (IOException e) { throw new IOError(e); } } public ConcurrentNavigableMap headMap(K toKey2, boolean inclusive) { K toKey3 = Utils.min(this.toKey,toKey2,comparator()); boolean inclusive2 = toKey3 == toKey? toInclusive : inclusive; return new BTreeMap(tree, readonly, this.fromKey, this.fromInclusive, toKey3, inclusive2); } public ConcurrentNavigableMap headMap(K toKey) { return headMap(toKey,false); } public Entry lowerEntry(K key) { K k = lowerKey(key); return k==null? null : new SimpleEntry(k,get(k)); } public K lowerKey(K key) { if (isEmpty()) return null; K key2 = Utils.min(key,toKey,comparator()); try { BTree.BTreeTupleBrowser b = tree.browse(key2,true) ; BTree.BTreeTuple t = new BTree.BTreeTuple(); b.getPrevious(t); return t.key; } catch (IOException e) { throw new IOError(e); } } public Entry floorEntry(K key) { K k = floorKey(key); return k==null? null : new SimpleEntry(k,get(k)); } public K floorKey(K key) { if (isEmpty()) return null; K key2 = Utils.max(key,fromKey,comparator()); try { BTree.BTreeTupleBrowser b = tree.browse(key2,true) ; BTree.BTreeTuple t = new BTree.BTreeTuple(); b.getNext(t); Comparator comp = comparator(); if (comp == null) comp = Utils.COMPARABLE_COMPARATOR; if(comp.compare(t.key,key2) == 0) return t.key; b.getPrevious(t); b.getPrevious(t); return t.key; } catch (IOException e) { throw new IOError(e); } } public Entry ceilingEntry(K key) { K k = ceilingKey(key); return k==null? null : new SimpleEntry(k,get(k)); } public K ceilingKey(K key) { if (isEmpty()) return null; K key2 = Utils.min(key,toKey,comparator()); try { BTree.BTreeTupleBrowser b = tree.browse(key2,true) ; BTree.BTreeTuple t = new BTree.BTreeTuple(); b.getNext(t); return t.key; } catch (IOException e) { throw new IOError(e); } } public Entry higherEntry(K key) { K k = higherKey(key); return k==null? null : new SimpleEntry(k,get(k)); } public K higherKey(K key) { if (isEmpty()) return null; K key2 = Utils.max(key,fromKey,comparator()); try { BTree.BTreeTupleBrowser b = tree.browse(key2,false) ; BTree.BTreeTuple t = new BTree.BTreeTuple(); b.getNext(t); return t.key; } catch (IOException e) { throw new IOError(e); } } public Entry firstEntry() { K k = firstKey(); return k==null? null : new SimpleEntry(k,get(k)); } public Entry lastEntry() { K k = lastKey(); return k==null? null : new SimpleEntry(k,get(k)); } public Entry pollFirstEntry() { Entry first = firstEntry(); if(first!=null) remove(first.getKey()); return first; } public Entry pollLastEntry() { Entry last = lastEntry(); if(last!=null) remove(last.getKey()); return last; } public ConcurrentNavigableMap descendingMap() { throw new UnsupportedOperationException("not implemented yet"); //TODO implement descending (reverse order) map } public NavigableSet keySet() { return navigableKeySet(); } public NavigableSet navigableKeySet() { if(keySet2 == null) keySet2 = new BTreeSet((BTreeMap) this); return keySet2; } public NavigableSet descendingKeySet() { return descendingMap().navigableKeySet(); } public ConcurrentNavigableMap tailMap(K fromKey) { return tailMap(fromKey,true); } public ConcurrentNavigableMap tailMap(K fromKey2, boolean inclusive) { K fromKey3 = Utils.max(this.fromKey,fromKey2,comparator()); boolean inclusive2 = fromKey3 == toKey? toInclusive : inclusive; return new BTreeMap(tree, readonly, fromKey3, inclusive2, toKey, toInclusive); } public ConcurrentNavigableMap subMap(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) { Comparator comp = comparator(); if (comp == null) comp = Utils.COMPARABLE_COMPARATOR; if (comp.compare(fromKey, toKey) > 0) throw new IllegalArgumentException("fromKey is bigger then toKey"); return new BTreeMap(tree, readonly, fromKey, fromInclusive, toKey, toInclusive); } public ConcurrentNavigableMap subMap(K fromKey, K toKey) { return subMap(fromKey,true,toKey,false); } public BTree getTree() { return tree; } public void addRecordListener(RecordListener listener) { tree.addRecordListener(listener); } public DBAbstract getRecordManager() { return tree.getRecordManager(); } public void removeRecordListener(RecordListener listener) { tree.removeRecordListener(listener); } public int size() { if (fromKey == null && toKey == null) return (int) tree._entries; //use fast counter on tree if Map has no bounds else { //had to count items in iterator Iterator iter = keySet().iterator(); int counter = 0; while (iter.hasNext()) { iter.next(); counter++; } return counter; } } public V putIfAbsent(K key, V value) { tree.lock.writeLock().lock(); try{ if (!containsKey(key)) return put(key, value); else return get(key); }finally { tree.lock.writeLock().unlock(); } } public boolean remove(Object key, Object value) { tree.lock.writeLock().lock(); try{ if (containsKey(key) && get(key).equals(value)) { remove(key); return true; } else return false; }finally { tree.lock.writeLock().unlock(); } } public boolean replace(K key, V oldValue, V newValue) { tree.lock.writeLock().lock(); try{ if (containsKey(key) && get(key).equals(oldValue)) { put(key, newValue); return true; } else return false; }finally { tree.lock.writeLock().unlock(); } } public V replace(K key, V value) { tree.lock.writeLock().lock(); try{ if (containsKey(key)) { return put(key, value); } else return null; }finally { tree.lock.writeLock().unlock(); } } } ================================================ FILE: src/main/java/org/apache/jdbm/BTreeNode.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.util.ConcurrentModificationException; import java.util.List; /** * Node of a BTree. *

* The node contains a number of key-value pairs. Keys are ordered to allow * dichotomic search. If value is too big, it is stored in separate record * and only recid reference is stored *

* If the node is a leaf node, the keys and values are user-defined and * represent entries inserted by the user. *

* If the node is non-leaf, each key represents the greatest key in the * underlying BTreeNode and the values are recids pointing to the children BTreeNodes. * The only exception is the rightmost BTreeNode, which is considered to have an * "infinite" key value, meaning that any insert will be to the left of this * pseudo-key * * @author Alex Boisvert * @author Jan Kotek */ final class BTreeNode implements Serializer> { private static final boolean DEBUG = false; /** * Parent B+Tree. */ transient BTree _btree; /** * This BTreeNode's record ID in the DB. */ protected transient long _recid; /** * Flag indicating if this is a leaf BTreeNode. */ protected boolean _isLeaf; /** * Keys of children nodes */ protected K[] _keys; /** * Values associated with keys. (Only valid if leaf node) */ protected Object[] _values; /** * Children nodes (recids) associated with keys. (Only valid if non-leaf node) */ protected long[] _children; /** * Index of first used item at the node */ protected byte _first; /** * Previous leaf node (only if this node is a leaf) */ protected long _previous; /** * Next leaf node (only if this node is a leaf) */ protected long _next; /** * Return the B+Tree that is the owner of this {@link BTreeNode}. */ public BTree getBTree() { return _btree; } /** * No-argument constructor used by serialization. */ public BTreeNode() { // empty } /** * Root node overflow constructor */ @SuppressWarnings("unchecked") BTreeNode(BTree btree, BTreeNode root, BTreeNode overflow) throws IOException { _btree = btree; _isLeaf = false; _first = BTree.DEFAULT_SIZE - 2; _keys = (K[]) new Object[BTree.DEFAULT_SIZE]; _keys[BTree.DEFAULT_SIZE - 2] = overflow.getLargestKey(); _keys[BTree.DEFAULT_SIZE - 1] = root.getLargestKey(); _children = new long[BTree.DEFAULT_SIZE]; _children[BTree.DEFAULT_SIZE - 2] = overflow._recid; _children[BTree.DEFAULT_SIZE - 1] = root._recid; _recid = _btree._db.insert(this, this,false); } /** * Root node (first insert) constructor. */ @SuppressWarnings("unchecked") BTreeNode(BTree btree, K key, V value) throws IOException { _btree = btree; _isLeaf = true; _first = BTree.DEFAULT_SIZE - 2; _keys = (K[]) new Object[BTree.DEFAULT_SIZE]; _keys[BTree.DEFAULT_SIZE - 2] = key; _keys[BTree.DEFAULT_SIZE - 1] = null; // I am the root BTreeNode for now _values = new Object[BTree.DEFAULT_SIZE]; _values[BTree.DEFAULT_SIZE - 2] = value; _values[BTree.DEFAULT_SIZE - 1] = null; // I am the root BTreeNode for now _recid = _btree._db.insert(this, this,false); } /** * Overflow node constructor. Creates an empty BTreeNode. */ @SuppressWarnings("unchecked") BTreeNode(BTree btree, boolean isLeaf){ _btree = btree; _isLeaf = isLeaf; // node will initially be half-full _first = BTree.DEFAULT_SIZE / 2; _keys = (K[]) new Object[BTree.DEFAULT_SIZE]; if (isLeaf) { _values = new Object[BTree.DEFAULT_SIZE]; } else { _children = new long[BTree.DEFAULT_SIZE]; } try{ _recid = _btree._db.insert(this, this,false); }catch(IOException e ){ throw new IOError(e); } } /** * Get largest key under this BTreeNode. Null is considered to be the * greatest possible key. */ K getLargestKey() { return _keys[BTree.DEFAULT_SIZE - 1]; } /** * Return true if BTreeNode is empty. */ boolean isEmpty() { if (_isLeaf) { return (_first == _values.length - 1); } else { return (_first == _children.length - 1); } } /** * Return true if BTreeNode is full. */ boolean isFull() { return (_first == 0); } /** * Find the object associated with the given key. * * @param height Height of the current BTreeNode (zero is leaf node) * @param key The key * @return TupleBrowser positionned just before the given key, or before * next greater key if key isn't found. */ BTree.BTreeTupleBrowser find(int height, final K key, final boolean inclusive) throws IOException { byte index = findChildren(key,inclusive); height -= 1; if (height == 0) { // leaf node return new Browser(this, index); } else { // non-leaf node BTreeNode child = loadNode(_children[index]); return child.find(height, key,inclusive); } } /** * Find value associated with the given key. * * @param height Height of the current BTreeNode (zero is leaf node) * @param key The key * @return TupleBrowser positionned just before the given key, or before * next greater key if key isn't found. */ V findValue(int height, K key) throws IOException { byte index = findChildren(key,true); height -= 1; if (height == 0) { K key2 = _keys[index]; // // get returns the matching key or the next ordered key, so we must // // check if we have an exact match if (key2 == null || compare(key, key2) != 0) return null; // leaf node if (_values[index] instanceof BTreeLazyRecord) return ((BTreeLazyRecord) _values[index]).get(); else return (V) _values[index]; } else { // non-leaf node BTreeNode child = loadNode(_children[index]); return child.findValue(height, key); } } /** * Find first entry and return a browser positioned before it. * * @return TupleBrowser positionned just before the first entry. */ BTree.BTreeTupleBrowser findFirst() throws IOException { if (_isLeaf) { return new Browser(this, _first); } else { BTreeNode child = loadNode(_children[_first]); return child.findFirst(); } } /** * Deletes this BTreeNode and all children nodes from the record manager */ void delete() throws IOException { if (_isLeaf) { if (_next != 0) { BTreeNode nextNode = loadNode(_next); if (nextNode._previous == _recid) { // this consistency check can be removed in production code nextNode._previous = _previous; _btree._db.update(nextNode._recid, nextNode, nextNode); } else { throw new Error("Inconsistent data in BTree"); } } if (_previous != 0) { BTreeNode previousNode = loadNode(_previous); if (previousNode._next != _recid) { // this consistency check can be removed in production code previousNode._next = _next; _btree._db.update(previousNode._recid, previousNode, previousNode); } else { throw new Error("Inconsistent data in BTree"); } } } else { int left = _first; int right = BTree.DEFAULT_SIZE - 1; for (int i = left; i <= right; i++) { BTreeNode childNode = loadNode(_children[i]); childNode.delete(); } } _btree._db.delete(_recid); } /** * Insert the given key and value. *

* Since the Btree does not support duplicate entries, the caller must * specify whether to replace the existing value. * * @param height Height of the current BTreeNode (zero is leaf node) * @param key Insert key * @param value Insert value * @param replace Set to true to replace the existing value, if one exists. * @return Insertion result containing existing value OR a BTreeNode if the key * was inserted and provoked a BTreeNode overflow. */ InsertResult insert(int height, K key, final V value, final boolean replace) throws IOException { InsertResult result; long overflow; final byte index = findChildren(key,true); height -= 1; if (height == 0) { //reuse InsertResult instance to avoid GC trashing on massive inserts result = _btree.insertResultReuse; _btree.insertResultReuse = null; if (result == null) result = new InsertResult(); // inserting on a leaf BTreeNode overflow = -1; if (DEBUG) { System.out.println("BTreeNode.insert() Insert on leaf node key=" + key + " value=" + value + " index=" + index); } if (compare(_keys[index], key) == 0) { // key already exists if (DEBUG) { System.out.println("BTreeNode.insert() Key already exists."); } boolean isLazyRecord = _values[index] instanceof BTreeLazyRecord; if (isLazyRecord) result._existing = ((BTreeLazyRecord) _values[index]).get(); else result._existing = (V) _values[index]; if (replace) { //remove old lazy record if necesarry if (isLazyRecord) ((BTreeLazyRecord) _values[index]).delete(); _values[index] = value; _btree._db.update(_recid, this, this); } // return the existing key return result; } } else { // non-leaf BTreeNode BTreeNode child = loadNode(_children[index]); result = child.insert(height, key, value, replace); if (result._existing != null) { // return existing key, if any. return result; } if (result._overflow == null) { // no overflow means we're done with insertion return result; } // there was an overflow, we need to insert the overflow node on this BTreeNode if (DEBUG) { System.out.println("BTreeNode.insert() Overflow node: " + result._overflow._recid); } key = result._overflow.getLargestKey(); overflow = result._overflow._recid; // update child's largest key _keys[index] = child.getLargestKey(); // clean result so we can reuse it result._overflow = null; } // if we get here, we need to insert a new entry on the BTreeNode before _children[ index ] if (!isFull()) { if (height == 0) { insertEntry(this, index - 1, key, value); } else { insertChild(this, index - 1, key, overflow); } _btree._db.update(_recid, this, this); return result; } // node is full, we must divide the node final byte half = BTree.DEFAULT_SIZE >> 1; BTreeNode newNode = new BTreeNode(_btree, _isLeaf); if (index < half) { // move lower-half of entries to overflow node, including new entry if (DEBUG) { System.out.println("BTreeNode.insert() move lower-half of entries to overflow BTreeNode, including new entry."); } if (height == 0) { copyEntries(this, 0, newNode, half, index); setEntry(newNode, half + index, key, value); copyEntries(this, index, newNode, half + index + 1, half - index - 1); } else { copyChildren(this, 0, newNode, half, index); setChild(newNode, half + index, key, overflow); copyChildren(this, index, newNode, half + index + 1, half - index - 1); } } else { // move lower-half of entries to overflow node, new entry stays on this node if (DEBUG) { System.out.println("BTreeNode.insert() move lower-half of entries to overflow BTreeNode. New entry stays"); } if (height == 0) { copyEntries(this, 0, newNode, half, half); copyEntries(this, half, this, half - 1, index - half); setEntry(this, index - 1, key, value); } else { copyChildren(this, 0, newNode, half, half); copyChildren(this, half, this, half - 1, index - half); setChild(this, index - 1, key, overflow); } } _first = half - 1; // nullify lower half of entries for (int i = 0; i < _first; i++) { if (height == 0) { setEntry(this, i, null, null); } else { setChild(this, i, null, -1); } } if (_isLeaf) { // link newly created node newNode._previous = _previous; newNode._next = _recid; if (_previous != 0) { BTreeNode previous = loadNode(_previous); previous._next = newNode._recid; _btree._db.update(_previous, previous, this); } _previous = newNode._recid; } _btree._db.update(_recid, this, this); _btree._db.update(newNode._recid, newNode, this); result._overflow = newNode; return result; } /** * Remove the entry associated with the given key. * * @param height Height of the current BTreeNode (zero is leaf node) * @param key Removal key * @return Remove result object */ RemoveResult remove(int height, K key) throws IOException { RemoveResult result; int half = BTree.DEFAULT_SIZE / 2; byte index = findChildren(key,true); height -= 1; if (height == 0) { // remove leaf entry if (compare(_keys[index], key) != 0) { throw new IllegalArgumentException("Key not found: " + key); } result = new RemoveResult(); if (_values[index] instanceof BTreeLazyRecord) { BTreeLazyRecord r = (BTreeLazyRecord) _values[index]; result._value = r.get(); r.delete(); } else { result._value = (V) _values[index]; } removeEntry(this, index); // update this node _btree._db.update(_recid, this, this); } else { // recurse into Btree to remove entry on a children node BTreeNode child = loadNode(_children[index]); result = child.remove(height, key); // update children _keys[index] = child.getLargestKey(); _btree._db.update(_recid, this, this); if (result._underflow) { // underflow occured if (child._first != half + 1) { throw new IllegalStateException("Error during underflow [1]"); } if (index < _children.length - 1) { // exists greater brother node BTreeNode brother = loadNode(_children[index + 1]); int bfirst = brother._first; if (bfirst < half) { // steal entries from "brother" node int steal = (half - bfirst + 1) / 2; brother._first += steal; child._first -= steal; if (child._isLeaf) { copyEntries(child, half + 1, child, half + 1 - steal, half - 1); copyEntries(brother, bfirst, child, 2 * half - steal, steal); } else { copyChildren(child, half + 1, child, half + 1 - steal, half - 1); copyChildren(brother, bfirst, child, 2 * half - steal, steal); } for (int i = bfirst; i < bfirst + steal; i++) { if (brother._isLeaf) { setEntry(brother, i, null, null); } else { setChild(brother, i, null, -1); } } // update child's largest key _keys[index] = child.getLargestKey(); // no change in previous/next node // update nodes _btree._db.update(_recid, this, this); _btree._db.update(brother._recid, brother, this); _btree._db.update(child._recid, child, this); } else { // move all entries from node "child" to "brother" if (brother._first != half) { throw new IllegalStateException("Error during underflow [2]"); } brother._first = 1; if (child._isLeaf) { copyEntries(child, half + 1, brother, 1, half - 1); } else { copyChildren(child, half + 1, brother, 1, half - 1); } _btree._db.update(brother._recid, brother, this); // remove "child" from current node if (_isLeaf) { copyEntries(this, _first, this, _first + 1, index - _first); setEntry(this, _first, null, null); } else { copyChildren(this, _first, this, _first + 1, index - _first); setChild(this, _first, null, -1); } _first += 1; _btree._db.update(_recid, this, this); // re-link previous and next nodes if (child._previous != 0) { BTreeNode prev = loadNode(child._previous); prev._next = child._next; _btree._db.update(prev._recid, prev, this); } if (child._next != 0) { BTreeNode next = loadNode(child._next); next._previous = child._previous; _btree._db.update(next._recid, next, this); } // delete "child" node _btree._db.delete(child._recid); } } else { // node "brother" is before "child" BTreeNode brother = loadNode(_children[index - 1]); int bfirst = brother._first; if (bfirst < half) { // steal entries from "brother" node int steal = (half - bfirst + 1) / 2; brother._first += steal; child._first -= steal; if (child._isLeaf) { copyEntries(brother, 2 * half - steal, child, half + 1 - steal, steal); copyEntries(brother, bfirst, brother, bfirst + steal, 2 * half - bfirst - steal); } else { copyChildren(brother, 2 * half - steal, child, half + 1 - steal, steal); copyChildren(brother, bfirst, brother, bfirst + steal, 2 * half - bfirst - steal); } for (int i = bfirst; i < bfirst + steal; i++) { if (brother._isLeaf) { setEntry(brother, i, null, null); } else { setChild(brother, i, null, -1); } } // update brother's largest key _keys[index - 1] = brother.getLargestKey(); // no change in previous/next node // update nodes _btree._db.update(_recid, this, this); _btree._db.update(brother._recid, brother, this); _btree._db.update(child._recid, child, this); } else { // move all entries from node "brother" to "child" if (brother._first != half) { throw new IllegalStateException("Error during underflow [3]"); } child._first = 1; if (child._isLeaf) { copyEntries(brother, half, child, 1, half); } else { copyChildren(brother, half, child, 1, half); } _btree._db.update(child._recid, child, this); // remove "brother" from current node if (_isLeaf) { copyEntries(this, _first, this, _first + 1, index - 1 - _first); setEntry(this, _first, null, null); } else { copyChildren(this, _first, this, _first + 1, index - 1 - _first); setChild(this, _first, null, -1); } _first += 1; _btree._db.update(_recid, this, this); // re-link previous and next nodes if (brother._previous != 0) { BTreeNode prev = loadNode(brother._previous); prev._next = brother._next; _btree._db.update(prev._recid, prev, this); } if (brother._next != 0) { BTreeNode next = loadNode(brother._next); next._previous = brother._previous; _btree._db.update(next._recid, next, this); } // delete "brother" node _btree._db.delete(brother._recid); } } } } // underflow if node is more than half-empty result._underflow = _first > half; return result; } /** * Find the first children node with a key equal or greater than the given * key. * * @return index of first children with equal or greater key. */ private byte findChildren(final K key, final boolean inclusive) { int left = _first; int right = BTree.DEFAULT_SIZE - 1; int middle; final int D = inclusive?0:1; // binary search while (true) { middle = (left + right) / 2; if (compare(_keys[middle], key) < D) { left = middle + 1; } else { right = middle; } if (left >= right) { return (byte) right; } } } /** * Insert entry at given position. */ private static void insertEntry(BTreeNode node, int index, K key, V value) { K[] keys = node._keys; Object[] values = node._values; int start = node._first; int count = index - node._first + 1; // shift entries to the left System.arraycopy(keys, start, keys, start - 1, count); System.arraycopy(values, start, values, start - 1, count); node._first -= 1; keys[index] = key; values[index] = value; } /** * Insert child at given position. */ private static void insertChild(BTreeNode node, int index, K key, long child) { K[] keys = node._keys; long[] children = node._children; int start = node._first; int count = index - node._first + 1; // shift entries to the left System.arraycopy(keys, start, keys, start - 1, count); System.arraycopy(children, start, children, start - 1, count); node._first -= 1; keys[index] = key; children[index] = child; } /** * Remove entry at given position. */ private static void removeEntry(BTreeNode node, int index) { K[] keys = node._keys; Object[] values = node._values; int start = node._first; int count = index - node._first; System.arraycopy(keys, start, keys, start + 1, count); keys[start] = null; System.arraycopy(values, start, values, start + 1, count); values[start] = null; node._first++; } /** * Set the entry at the given index. */ private static void setEntry(BTreeNode node, int index, K key, V value) { node._keys[index] = key; node._values[index] = value; } /** * Set the child BTreeNode recid at the given index. */ private static void setChild(BTreeNode node, int index, K key, long recid) { node._keys[index] = key; node._children[index] = recid; } /** * Copy entries between two nodes */ private static void copyEntries(BTreeNode source, int indexSource, BTreeNode dest, int indexDest, int count) { System.arraycopy(source._keys, indexSource, dest._keys, indexDest, count); System.arraycopy(source._values, indexSource, dest._values, indexDest, count); } /** * Copy child node recids between two nodes */ private static void copyChildren(BTreeNode source, int indexSource, BTreeNode dest, int indexDest, int count) { System.arraycopy(source._keys, indexSource, dest._keys, indexDest, count); System.arraycopy(source._children, indexSource, dest._children, indexDest, count); } /** * Load the node at the given recid. */ private BTreeNode loadNode(long recid) throws IOException { BTreeNode child = _btree._db.fetch(recid, this); child._recid = recid; child._btree = _btree; return child; } private final int compare(final K value1, final K value2) { if (value1 == null) { return 1; } if (value2 == null) { return -1; } if (_btree._comparator == null) { return ((Comparable) value1).compareTo(value2); } else { return _btree._comparator.compare(value1, value2); } } /** * Dump the structure of the tree on the screen. This is used for debugging * purposes only. */ private void dump(int height) { String prefix = ""; for (int i = 0; i < height; i++) { prefix += " "; } System.out.println(prefix + "-------------------------------------- BTreeNode recid=" + _recid); System.out.println(prefix + "first=" + _first); for (int i = 0; i < BTree.DEFAULT_SIZE; i++) { if (_isLeaf) { System.out.println(prefix + "BTreeNode [" + i + "] " + _keys[i] + " " + _values[i]); } else { System.out.println(prefix + "BTreeNode [" + i + "] " + _keys[i] + " " + _children[i]); } } System.out.println(prefix + "--------------------------------------"); } /** * Recursively dump the state of the BTree on screen. This is used for * debugging purposes only. */ void dumpRecursive(int height, int level) throws IOException { height -= 1; level += 1; if (height > 0) { for (byte i = _first; i < BTree.DEFAULT_SIZE; i++) { if (_keys[i] == null) break; BTreeNode child = loadNode(_children[i]); child.dump(level); child.dumpRecursive(height, level); } } } /** * Deserialize the content of an object from a byte array. */ @SuppressWarnings("unchecked") public BTreeNode deserialize(DataInput ois2) throws IOException { DataInputOutput ois = (DataInputOutput) ois2; BTreeNode node = new BTreeNode(); switch (ois.readUnsignedByte()) { case SerializationHeader.BTREE_NODE_LEAF: node._isLeaf = true; break; case SerializationHeader.BTREE_NODE_NONLEAF: node._isLeaf = false; break; default: throw new InternalError("wrong BTreeNode header"); } if (node._isLeaf) { node._previous = LongPacker.unpackLong(ois); node._next = LongPacker.unpackLong(ois); } node._first = ois.readByte(); if (!node._isLeaf) { node._children = new long[BTree.DEFAULT_SIZE]; for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { node._children[i] = LongPacker.unpackLong(ois); } } if (!_btree.loadValues) return node; try { node._keys = readKeys(ois, node._first); } catch (ClassNotFoundException except) { throw new IOException(except.getMessage()); } if (node._isLeaf) { try { readValues(ois, node); } catch (ClassNotFoundException except) { throw new IOException(except); } } return node; } /** * Serialize the content of an object into a byte array. * * @param obj Object to serialize * @return a byte array representing the object's state */ public void serialize(DataOutput oos, BTreeNode obj) throws IOException { // note: It is assumed that BTreeNode instance doing the serialization is the parent // of the BTreeNode object being serialized. BTreeNode node = obj; oos.writeByte(node._isLeaf ? SerializationHeader.BTREE_NODE_LEAF : SerializationHeader.BTREE_NODE_NONLEAF); if (node._isLeaf) { LongPacker.packLong(oos, node._previous); LongPacker.packLong(oos, node._next); } oos.write(node._first); if (!node._isLeaf) { for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { LongPacker.packLong(oos, node._children[i]); } } writeKeys(oos, node._keys, node._first); if (node._isLeaf && _btree.hasValues()) { writeValues(oos, node); } } private void readValues(DataInputOutput ois, BTreeNode node) throws IOException, ClassNotFoundException { node._values = new Object[BTree.DEFAULT_SIZE]; if(_btree.hasValues()){ Serializer serializer = _btree.valueSerializer != null ? _btree.valueSerializer : (Serializer) _btree.getRecordManager().defaultSerializer(); for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { int header = ois.readUnsignedByte(); if (header == BTreeLazyRecord.NULL) { node._values[i] = null; } else if (header == BTreeLazyRecord.LAZY_RECORD) { long recid = LongPacker.unpackLong(ois); node._values[i] = new BTreeLazyRecord(_btree._db, recid, serializer); } else { node._values[i] = BTreeLazyRecord.fastDeser(ois, serializer, header); } } }else{ //create fake values for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { if(node._keys[i]!=null) node._values[i] = Utils.EMPTY_STRING; } } } private void writeValues(DataOutput oos, BTreeNode node) throws IOException { DataInputOutput output = null; Serializer serializer = _btree.valueSerializer != null ? _btree.valueSerializer : _btree.getRecordManager().defaultSerializer(); for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { if (node._values[i] instanceof BTreeLazyRecord) { oos.write(BTreeLazyRecord.LAZY_RECORD); LongPacker.packLong(oos, ((BTreeLazyRecord) node._values[i]).recid); } else if (node._values[i] != null) { if (output == null) { output = new DataInputOutput(); } else { output.reset(); } serializer.serialize(output, node._values[i]); if (output.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) { //write as separate record long recid = _btree._db.insert(output.toByteArray(), BTreeLazyRecord.FAKE_SERIALIZER,true); oos.write(BTreeLazyRecord.LAZY_RECORD); LongPacker.packLong(oos, recid); } else { //write as part of btree oos.write(output.getPos()); oos.write(output.getBuf(), 0, output.getPos()); } } else { oos.write(BTreeLazyRecord.NULL); } } } private static final int ALL_NULL = 0; private static final int ALL_INTEGERS = 1 << 5; private static final int ALL_INTEGERS_NEGATIVE = 2 << 5; private static final int ALL_LONGS = 3 << 5; private static final int ALL_LONGS_NEGATIVE = 4 << 5; private static final int ALL_STRINGS = 5 << 5; private static final int ALL_OTHER = 6 << 5; private K[] readKeys(DataInput ois, final int firstUse) throws IOException, ClassNotFoundException { Object[] ret = new Object[BTree.DEFAULT_SIZE]; final int type = ois.readUnsignedByte(); if (type == ALL_NULL) { return (K[]) ret; } else if (type == ALL_INTEGERS || type == ALL_INTEGERS_NEGATIVE) { long first = LongPacker.unpackLong(ois); if (type == ALL_INTEGERS_NEGATIVE) first = -first; ret[firstUse] = Integer.valueOf((int) first); for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) { // ret[i] = Serialization.readObject(ois); long v = LongPacker.unpackLong(ois); if (v == 0) continue; //null v = v + first; ret[i] = Integer.valueOf((int) v); first = v; } return (K[]) ret; } else if (type == ALL_LONGS || type == ALL_LONGS_NEGATIVE) { long first = LongPacker.unpackLong(ois); if (type == ALL_LONGS_NEGATIVE) first = -first; ret[firstUse] = Long.valueOf(first); for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) { //ret[i] = Serialization.readObject(ois); long v = LongPacker.unpackLong(ois); if (v == 0) continue; //null v = v + first; ret[i] = Long.valueOf(v); first = v; } return (K[]) ret; } else if (type == ALL_STRINGS) { byte[] previous = null; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { byte[] b = leadingValuePackRead(ois, previous, 0); if (b == null) continue; ret[i] = new String(b,Serialization.UTF8); previous = b; } return (K[]) ret; } else if (type == ALL_OTHER) { //TODO why this block is here? if (_btree.keySerializer == null || _btree.keySerializer == _btree.getRecordManager().defaultSerializer()) { for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { ret[i] = _btree.getRecordManager().defaultSerializer().deserialize(ois); } return (K[]) ret; } Serializer ser = _btree.keySerializer != null ? _btree.keySerializer : _btree.getRecordManager().defaultSerializer(); DataInputOutput in2 = null; byte[] previous = null; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { byte[] b = leadingValuePackRead(ois, previous, 0); if (b == null) continue; if (in2 == null) { in2 = new DataInputOutput(); } in2.reset(b); ret[i] = ser.deserialize(in2); previous = b; } return (K[]) ret; } else { throw new InternalError("unknown BTreeNode header type: " + type); } } @SuppressWarnings("unchecked") private void writeKeys(DataOutput oos, K[] keys, final int firstUse) throws IOException { if (keys.length != BTree.DEFAULT_SIZE) throw new IllegalArgumentException("wrong keys size"); //check if all items on key are null boolean allNull = true; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] != null) { allNull = false; break; } } if (allNull) { oos.write(ALL_NULL); return; } /** * Special compression to compress Long and Integer */ if ((_btree._comparator == Utils.COMPARABLE_COMPARATOR || _btree._comparator == null) && (_btree.keySerializer == null || _btree.keySerializer == _btree.getRecordManager().defaultSerializer())) { boolean allInteger = true; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] != null && keys[i].getClass() != Integer.class) { allInteger = false; break; } } boolean allLong = true; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] != null && (keys[i].getClass() != Long.class || //special case to exclude Long.MIN_VALUE from conversion, causes problems to LongPacker ((Long) keys[i]).longValue() == Long.MIN_VALUE) ) { allLong = false; break; } } if (allLong) { //check that diff between MIN and MAX fits into PACKED_LONG long max = Long.MIN_VALUE; long min = Long.MAX_VALUE; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] == null) continue; long v = (Long) keys[i]; if (v > max) max = v; if (v < min) min = v; } //now convert to Double to prevent overflow errors double max2 = max; double min2 = min; double maxDiff = Long.MAX_VALUE; if (max2 - min2 > maxDiff / 2) // divide by two just to by sure allLong = false; } if (allLong && allInteger) throw new InternalError(); if (allLong || allInteger) { long first = ((Number) keys[firstUse]).longValue(); //write header if (allInteger) { if (first > 0) oos.write(ALL_INTEGERS); else oos.write(ALL_INTEGERS_NEGATIVE); } else if (allLong) { if (first > 0) oos.write(ALL_LONGS); else oos.write(ALL_LONGS_NEGATIVE); } else { throw new InternalError(); } //write first LongPacker.packLong(oos, Math.abs(first)); //write others for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) { // Serialization.writeObject(oos, keys[i]); if (keys[i] == null) LongPacker.packLong(oos, 0); else { long v = ((Number) keys[i]).longValue(); if (v <= first) throw new InternalError("not ordered"); LongPacker.packLong(oos, v - first); first = v; } } return; } else { //another special case for Strings boolean allString = true; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] != null && (keys[i].getClass() != String.class) ) { allString = false; break; } } if (allString) { oos.write(ALL_STRINGS); byte[] previous = null; for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] == null) { leadingValuePackWrite(oos, null, previous, 0); } else { byte[] b = ((String) keys[i]).getBytes(Serialization.UTF8); leadingValuePackWrite(oos, b, previous, 0); previous = b; } } return; } } } /** * other case, serializer is provided or other stuff */ oos.write(ALL_OTHER); if (_btree.keySerializer == null || _btree.keySerializer == _btree.getRecordManager().defaultSerializer()) { for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { _btree.getRecordManager().defaultSerializer().serialize(oos, keys[i]); } return; } //custom serializer is provided, use it Serializer ser = _btree.keySerializer; byte[] previous = null; DataInputOutput out3 = new DataInputOutput(); for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { if (keys[i] == null) { leadingValuePackWrite(oos, null, previous, 0); } else { out3.reset(); ser.serialize(out3, keys[i]); byte[] b = out3.toByteArray(); leadingValuePackWrite(oos, b, previous, 0); previous = b; } } } public void defrag(DBStore r1, DBStore r2) throws IOException { if (_children != null) for (long child : _children) { if (child == 0) continue; byte[] data = r1.fetchRaw(child); r2.forceInsert(child, data); BTreeNode t = deserialize(new DataInputOutput(data)); t._btree = _btree; t.defrag(r1, r2); } } /** * STATIC INNER CLASS * Result from insert() method call */ static final class InsertResult { /** * Overflow node. */ BTreeNode _overflow; /** * Existing value for the insertion key. */ V _existing; } /** * STATIC INNER CLASS * Result from remove() method call */ static final class RemoveResult { /** * Set to true if underlying nodes underflowed */ boolean _underflow; /** * Removed entry value */ V _value; } /** * PRIVATE INNER CLASS * Browser to traverse leaf nodes. */ static final class Browser implements BTree.BTreeTupleBrowser { /** * Current node. */ private BTreeNode _node; /** * Current index in the node. The index positionned on the next * tuple to return. */ private byte _index; private int expectedModCount; /** * Create a browser. * * @param node Current node * @param index Position of the next tuple to return. */ Browser(BTreeNode node, byte index) { _node = node; _index = index; expectedModCount = node._btree.modCount; } public boolean getNext(BTree.BTreeTuple tuple) throws IOException { if (expectedModCount != _node._btree.modCount) throw new ConcurrentModificationException(); if (_node == null) { //last record in iterator was deleted, so iterator is at end of node return false; } if (_index < BTree.DEFAULT_SIZE) { if (_node._keys[_index] == null) { // reached end of the tree. return false; } } else if (_node._next != 0) { // move to next node _node = _node.loadNode(_node._next); _index = _node._first; } tuple.key = _node._keys[_index]; if (_node._values[_index] instanceof BTreeLazyRecord) tuple.value = ((BTreeLazyRecord) _node._values[_index]).get(); else tuple.value = (V) _node._values[_index]; _index++; return true; } public boolean getPrevious(BTree.BTreeTuple tuple) throws IOException { if (expectedModCount != _node._btree.modCount) throw new ConcurrentModificationException(); if (_node == null) { //deleted last record, but this situation is only supportedd on getNext throw new InternalError(); } if (_index == _node._first) { if (_node._previous != 0) { _node = _node.loadNode(_node._previous); _index = BTree.DEFAULT_SIZE; } else { // reached beginning of the tree return false; } } _index--; tuple.key = _node._keys[_index]; if (_node._values[_index] instanceof BTreeLazyRecord) tuple.value = ((BTreeLazyRecord) _node._values[_index]).get(); else tuple.value = (V) _node._values[_index]; return true; } public void remove(K key) throws IOException { if (expectedModCount != _node._btree.modCount) throw new ConcurrentModificationException(); _node._btree.remove(key); expectedModCount++; //An entry was removed and this may trigger tree rebalance, //This would change current node layout, so find our position again BTree.BTreeTupleBrowser b = _node._btree.browse(key,true); //browser is positioned just before value which was currently deleted, so find if we have new value if (b.getNext(new BTree.BTreeTuple(null, null))) { //next value value exists, copy its state Browser b2 = (Browser) b; this._node = b2._node; this._index = b2._index; } else { this._node = null; this._index = -1; } } } /** * Used for debugging and testing only. Recursively obtains the recids of * all child BTreeNodes and adds them to the 'out' list. * * @param out * @param height * @throws IOException */ void dumpChildNodeRecIDs(List out, int height) throws IOException { height -= 1; if (height > 0) { for (byte i = _first; i < BTree.DEFAULT_SIZE; i++) { if (_children[i] == 0) continue; BTreeNode child = loadNode(_children[i]); out.add(new Long(child._recid)); child.dumpChildNodeRecIDs(out, height); } } } /** * Read previously written data * * @author Kevin Day */ static byte[] leadingValuePackRead(DataInput in, byte[] previous, int ignoreLeadingCount) throws IOException { int len = LongPacker.unpackInt(in) - 1; // 0 indicates null if (len == -1) return null; int actualCommon = LongPacker.unpackInt(in); byte[] buf = new byte[len]; if (previous == null) { actualCommon = 0; } if (actualCommon > 0) { in.readFully(buf, 0, ignoreLeadingCount); System.arraycopy(previous, ignoreLeadingCount, buf, ignoreLeadingCount, actualCommon - ignoreLeadingCount); } in.readFully(buf, actualCommon, len - actualCommon); return buf; } /** * This method is used for delta compression for keys. * Writes the contents of buf to the DataOutput out, with special encoding if * there are common leading bytes in the previous group stored by this compressor. * * @author Kevin Day */ static void leadingValuePackWrite(DataOutput out, byte[] buf, byte[] previous, int ignoreLeadingCount) throws IOException { if (buf == null) { LongPacker.packInt(out, 0); return; } int actualCommon = ignoreLeadingCount; if (previous != null) { int maxCommon = buf.length > previous.length ? previous.length : buf.length; if (maxCommon > Short.MAX_VALUE) maxCommon = Short.MAX_VALUE; for (; actualCommon < maxCommon; actualCommon++) { if (buf[actualCommon] != previous[actualCommon]) break; } } // there are enough common bytes to justify compression LongPacker.packInt(out, buf.length + 1);// store as +1, 0 indicates null LongPacker.packInt(out, actualCommon); out.write(buf, 0, ignoreLeadingCount); out.write(buf, actualCommon, buf.length - actualCommon); } BTreeNode loadLastChildNode() throws IOException { return loadNode(_children[BTree.DEFAULT_SIZE - 1]); } } ================================================ FILE: src/main/java/org/apache/jdbm/BTreeSet.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.util.*; /** * Wrapper class for >SortedMap to implement >NavigableSet *

* This code originally comes from Apache Harmony, was adapted by Jan Kotek for JDBM */ class BTreeSet extends AbstractSet implements NavigableSet { /** * use keyset from this map */ final BTreeMap map; BTreeSet(BTreeMap map) { this.map = map; } public boolean add(E object) { return map.put(object, Utils.EMPTY_STRING) == null; } public boolean addAll(Collection collection) { return super.addAll(collection); } public void clear() { map.clear(); } public Comparator comparator() { return map.comparator(); } public boolean contains(Object object) { return map.containsKey(object); } public boolean isEmpty() { return map.isEmpty(); } public E lower(E e) { return map.lowerKey(e); } public E floor(E e) { return map.floorKey(e); } public E ceiling(E e) { return map.ceilingKey(e); } public E higher(E e) { return map.higherKey(e); } public E pollFirst() { Map.Entry e = map.pollFirstEntry(); return e!=null? e.getKey():null; } public E pollLast() { Map.Entry e = map.pollLastEntry(); return e!=null? e.getKey():null; } public Iterator iterator() { final Iterator> iter = map.entrySet().iterator(); return new Iterator() { public boolean hasNext() { return iter.hasNext(); } public E next() { Map.Entry e = iter.next(); return e!=null?e.getKey():null; } public void remove() { iter.remove(); } }; } public NavigableSet descendingSet() { return map.descendingKeySet(); } public Iterator descendingIterator() { return map.descendingKeySet().iterator(); } public NavigableSet subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive) { return map.subMap(fromElement,fromInclusive,toElement,toInclusive).navigableKeySet(); } public NavigableSet headSet(E toElement, boolean inclusive) { return map.headMap(toElement,inclusive).navigableKeySet(); } public NavigableSet tailSet(E fromElement, boolean inclusive) { return map.tailMap(fromElement,inclusive).navigableKeySet(); } public boolean remove(Object object) { return map.remove(object) != null; } public int size() { return map.size(); } public E first() { return map.firstKey(); } public E last() { return map.lastKey(); } public SortedSet subSet(E start, E end) { Comparator c = map.comparator(); int compare = (c == null) ? ((Comparable) start).compareTo(end) : c .compare(start, end); if (compare <= 0) { return new BTreeSet((BTreeMap) map.subMap(start, true,end,false)); } throw new IllegalArgumentException(); } public SortedSet headSet(E end) { // Check for errors Comparator c = map.comparator(); if (c == null) { ((Comparable) end).compareTo(end); } else { c.compare(end, end); } return new BTreeSet((BTreeMap) map.headMap(end,false)); } public SortedSet tailSet(E start) { // Check for errors Comparator c = map.comparator(); if (c == null) { ((Comparable) start).compareTo(start); } else { c.compare(start, start); } return new BTreeSet((BTreeMap) map.tailMap(start,true)); } } ================================================ FILE: src/main/java/org/apache/jdbm/DB.java ================================================ package org.apache.jdbm; import java.util.*; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; /** * Database is root class for creating and loading persistent collections. It also contains * transaction operations. * //TODO just write some readme *

* * @author Jan Kotek * @author Alex Boisvert * @author Cees de Groot */ public interface DB { /** * Closes the DB and release resources. * DB can not be used after it was closed */ void close(); /** @return true if db was already closed */ boolean isClosed(); /** * Clear cache and remove all entries it contains. * This may be useful for some Garbage Collection when reference cache is used. */ void clearCache(); /** * Defragments storage so it consumes less space. * It basically copyes all records into different store and then renames it, replacing original store. *

* Defrag has two steps: In first collections are rearranged, so records in collection are close to each other, * and read speed is improved. In second step all records are sequentially transferred, reclaiming all unused space. * First step is optinal and may slow down defragmentation significantly as ut requires many random-access reads. * Second step reads and writes data sequentially and is very fast, comparable to copying files to new location. * *

* This commits any uncommited data. Defrag also requires free space, as store is basically recreated at new location. * * @param sortCollections if collection records should be rearranged during defragment, this takes some extra time */ void defrag(boolean sortCollections); /** * Commit (make persistent) all changes since beginning of transaction. * JDBM supports only single transaction. */ void commit(); /** * Rollback (cancel) all changes since beginning of transaction. * JDBM supports only single transaction. * This operations affects all maps created or loaded by this DB. */ void rollback(); /** * This calculates some database statistics such as collection sizes and record distributions. * Can be useful for performance optimalisations and trouble shuting. * This method can run for very long time. * * @return statistics contained in string */ String calculateStatistics(); /** * Copy database content into ZIP file * @param zipFile */ void copyToZip(String zipFile); /** * Get a Map which was already created and saved in DB. * This map uses disk based H*Tree and should have similar performance * as HashMap. * * @param name of hash map * * @return map */ ConcurrentMap getHashMap(String name); /** * Creates Map which persists data into DB. * * @param name record name * @return */ ConcurrentMap createHashMap(String name); /** * Creates Hash Map which persists data into DB. * Map will use custom serializers for Keys and Values. * Leave keySerializer null to use default serializer for keys * * @param Key type * @param Value type * @param name record name * @param keySerializer serializer to be used for Keys, leave null to use default serializer * @param valueSerializer serializer to be used for Values * @return */ ConcurrentMap createHashMap(String name, Serializer keySerializer, Serializer valueSerializer); Set createHashSet(String name); Set getHashSet(String name); Set createHashSet(String name, Serializer keySerializer); ConcurrentNavigableMap getTreeMap(String name); /** * Create TreeMap which persists data into DB. * * @param Key type * @param Value type * @param name record name * @return */ NavigableMap createTreeMap(String name); /** * Creates TreeMap which persists data into DB. * * @param Key type * @param Value type * @param name record name * @param keyComparator Comparator used to sort keys * @param keySerializer Serializer used for keys. This may reduce disk space usage * * @param valueSerializer Serializer used for values. This may reduce disk space usage * @return */ ConcurrentNavigableMap createTreeMap(String name, Comparator keyComparator, Serializer keySerializer, Serializer valueSerializer); NavigableSet getTreeSet(String name); NavigableSet createTreeSet(String name); NavigableSet createTreeSet(String name, Comparator keyComparator, Serializer keySerializer); List createLinkedList(String name); List createLinkedList(String name, Serializer serializer); List getLinkedList(String name); /** returns unmodifiable map which contains all collection names and collections thenselfs*/ Map getCollections(); /** completely remove collection from store*/ void deleteCollection(String name); /** Java Collections returns their size as int. This may not be enought for JDBM collections. * This method returns number of elements in JDBM collection as long. * * @param collection created by JDBM * @return number of elements in collection as long */ long collectionSize(Object collection); } ================================================ FILE: src/main/java/org/apache/jdbm/DBAbstract.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.DataInput; import java.io.DataOutput; import java.io.IOError; import java.io.IOException; import java.lang.ref.WeakReference; import java.util.*; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; /** * An abstract class implementing most of DB. * It also has some JDBM package protected stuff (getNamedRecord) */ abstract class DBAbstract implements DB { /** * Reserved slot for name directory recid. */ static final byte NAME_DIRECTORY_ROOT = 0; /** * Reserved slot for version number */ static final byte STORE_VERSION_NUMBER_ROOT = 1; /** * Reserved slot for recid where Serial class info is stored * * NOTE when introducing more roots, do not forget to update defrag */ static final byte SERIAL_CLASS_INFO_RECID_ROOT = 2; /** to prevent double instances of the same collection, we use weak value map * * //TODO what to do when there is rollback? * //TODO clear on close */ final private Map> collections = new HashMap>(); /** * Inserts a new record using a custom serializer. * * @param obj the object for the new record. * @param serializer a custom serializer * @return the rowid for the new record. * @throws java.io.IOException when one of the underlying I/O operations fails. */ abstract long insert(A obj, Serializer serializer,boolean disableCache) throws IOException; /** * Deletes a record. * * @param recid the rowid for the record that should be deleted. * @throws java.io.IOException when one of the underlying I/O operations fails. */ abstract void delete(long recid) throws IOException; /** * Updates a record using a custom serializer. * If given recid does not exist, IOException will be thrown before/during commit (cache). * * @param recid the recid for the record that is to be updated. * @param obj the new object for the record. * @param serializer a custom serializer * @throws java.io.IOException when one of the underlying I/O operations fails */ abstract void update(long recid, A obj, Serializer serializer) throws IOException; /** * Fetches a record using a custom serializer. * * @param recid the recid for the record that must be fetched. * @param serializer a custom serializer * @return the object contained in the record, null if given recid does not exist * @throws java.io.IOException when one of the underlying I/O operations fails. */ abstract A fetch(long recid, Serializer serializer) throws IOException; /** * Fetches a record using a custom serializer and optionaly disabled cache * * @param recid the recid for the record that must be fetched. * @param serializer a custom serializer * @param disableCache true to disable any caching mechanism * @return the object contained in the record, null if given recid does not exist * @throws java.io.IOException when one of the underlying I/O operations fails. */ abstract A fetch(long recid, Serializer serializer, boolean disableCache) throws IOException; public long insert(Object obj) throws IOException { return insert(obj, defaultSerializer(),false); } public void update(long recid, Object obj) throws IOException { update(recid, obj, defaultSerializer()); } synchronized public A fetch(long recid) throws IOException { return (A) fetch(recid, defaultSerializer()); } synchronized public ConcurrentMap getHashMap(String name) { Object o = getCollectionInstance(name); if(o!=null) return (ConcurrentMap) o; try { long recid = getNamedObject(name); if(recid == 0) return null; HTree tree = fetch(recid); tree.setPersistenceContext(this); if(!tree.hasValues()){ throw new ClassCastException("HashSet is not HashMap"); } collections.put(name,new WeakReference(tree)); return tree; } catch (IOException e) { throw new IOError(e); } } synchronized public ConcurrentMap createHashMap(String name) { return createHashMap(name, null, null); } public synchronized ConcurrentMap createHashMap(String name, Serializer keySerializer, Serializer valueSerializer) { try { assertNameNotExist(name); HTree tree = new HTree(this, keySerializer, valueSerializer,true); long recid = insert(tree); setNamedObject(name, recid); collections.put(name,new WeakReference(tree)); return tree; } catch (IOException e) { throw new IOError(e); } } public synchronized Set getHashSet(String name) { Object o = getCollectionInstance(name); if(o!=null) return (Set) o; try { long recid = getNamedObject(name); if(recid == 0) return null; HTree tree = fetch(recid); tree.setPersistenceContext(this); if(tree.hasValues()){ throw new ClassCastException("HashMap is not HashSet"); } Set ret = new HTreeSet(tree); collections.put(name,new WeakReference(ret)); return ret; } catch (IOException e) { throw new IOError(e); } } public synchronized Set createHashSet(String name) { return createHashSet(name, null); } public synchronized Set createHashSet(String name, Serializer keySerializer) { try { assertNameNotExist(name); HTree tree = new HTree(this, keySerializer, null,false); long recid = insert(tree); setNamedObject(name, recid); Set ret = new HTreeSet(tree); collections.put(name,new WeakReference(ret)); return ret; } catch (IOException e) { throw new IOError(e); } } synchronized public ConcurrentNavigableMap getTreeMap(String name) { Object o = getCollectionInstance(name); if(o!=null) return (ConcurrentNavigableMap ) o; try { long recid = getNamedObject(name); if(recid == 0) return null; BTree t = BTree.load(this, recid); if(!t.hasValues()) throw new ClassCastException("TreeSet is not TreeMap"); ConcurrentNavigableMap ret = new BTreeMap(t,false); //TODO put readonly flag here collections.put(name,new WeakReference(ret)); return ret; } catch (IOException e) { throw new IOError(e); } } synchronized public ConcurrentNavigableMap createTreeMap(String name) { return createTreeMap(name, null, null, null); } public synchronized ConcurrentNavigableMap createTreeMap(String name, Comparator keyComparator, Serializer keySerializer, Serializer valueSerializer) { try { assertNameNotExist(name); BTree tree = BTree.createInstance(this, keyComparator, keySerializer, valueSerializer,true); setNamedObject(name, tree.getRecid()); ConcurrentNavigableMap ret = new BTreeMap(tree,false); //TODO put readonly flag here collections.put(name,new WeakReference(ret)); return ret; } catch (IOException e) { throw new IOError(e); } } public synchronized NavigableSet getTreeSet(String name) { Object o = getCollectionInstance(name); if(o!=null) return (NavigableSet ) o; try { long recid = getNamedObject(name); if(recid == 0) return null; BTree t = BTree.load(this, recid); if(t.hasValues()) throw new ClassCastException("TreeMap is not TreeSet"); BTreeSet ret = new BTreeSet(new BTreeMap(t,false)); collections.put(name,new WeakReference(ret)); return ret; } catch (IOException e) { throw new IOError(e); } } public synchronized NavigableSet createTreeSet(String name) { return createTreeSet(name, null, null); } public synchronized NavigableSet createTreeSet(String name, Comparator keyComparator, Serializer keySerializer) { try { assertNameNotExist(name); BTree tree = BTree.createInstance(this, keyComparator, keySerializer, null,false); setNamedObject(name, tree.getRecid()); BTreeSet ret = new BTreeSet(new BTreeMap(tree,false)); collections.put(name,new WeakReference(ret)); return ret; } catch (IOException e) { throw new IOError(e); } } synchronized public List createLinkedList(String name) { return createLinkedList(name, null); } synchronized public List createLinkedList(String name, Serializer serializer) { try { assertNameNotExist(name); //allocate record and overwrite it LinkedList2 list = new LinkedList2(this, serializer); long recid = insert(list); setNamedObject(name, recid); collections.put(name,new WeakReference(list)); return list; } catch (IOException e) { throw new IOError(e); } } synchronized public List getLinkedList(String name) { Object o = getCollectionInstance(name); if(o!=null) return (List ) o; try { long recid = getNamedObject(name); if(recid == 0) return null; LinkedList2 list = (LinkedList2) fetch(recid); list.setPersistenceContext(this); collections.put(name,new WeakReference(list)); return list; } catch (IOException e) { throw new IOError(e); } } private synchronized Object getCollectionInstance(String name){ WeakReference ref = collections.get(name); if(ref==null)return null; Object o = ref.get(); if(o != null) return o; //already GCed collections.remove(name); return null; } private void assertNameNotExist(String name) throws IOException { if (getNamedObject(name) != 0) throw new IllegalArgumentException("Object with name '" + name + "' already exists"); } /** * Obtain the record id of a named object. Returns 0 if named object * doesn't exist. * Named objects are used to store Map views and other well known objects. */ synchronized protected long getNamedObject(String name) throws IOException{ long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); if(nameDirectory_recid == 0){ return 0; } HTree m = fetch(nameDirectory_recid); Long res = m.get(name); if(res == null) return 0; return res; } /** * Set the record id of a named object. * Named objects are used to store Map views and other well known objects. */ synchronized protected void setNamedObject(String name, long recid) throws IOException{ long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); HTree m = null; if(nameDirectory_recid == 0){ //does not exists, create it m = new HTree(this,null,null,true); nameDirectory_recid = insert(m); setRoot(NAME_DIRECTORY_ROOT,nameDirectory_recid); }else{ //fetch it m = fetch(nameDirectory_recid); } m.put(name,recid); } synchronized public Map getCollections(){ try{ Map ret = new LinkedHashMap(); long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); if(nameDirectory_recid==0) return ret; HTree m = fetch(nameDirectory_recid); for(Map.Entry e:m.entrySet()){ Object o = fetch(e.getValue()); if(o instanceof BTree){ if(((BTree) o).hasValues) o = getTreeMap(e.getKey()); else o = getTreeSet(e.getKey()); } else if( o instanceof HTree){ if(((HTree) o).hasValues) o = getHashMap(e.getKey()); else o = getHashSet(e.getKey()); } ret.put(e.getKey(), o); } return Collections.unmodifiableMap(ret); }catch(IOException e){ throw new IOError(e); } } synchronized public void deleteCollection(String name){ try{ long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); if(nameDirectory_recid==0) throw new IOException("Collection not found"); HTree dir = fetch(nameDirectory_recid); Long recid = dir.get(name); if(recid == null) throw new IOException("Collection not found"); Object o = fetch(recid); //we can not use O instance since it is not correctly initialized if(o instanceof LinkedList2){ LinkedList2 l = (LinkedList2) o; l.clear(); delete(l.rootRecid); }else if(o instanceof BTree){ ((BTree) o).clear(); } else if( o instanceof HTree){ HTree t = (HTree) o; t.clear(); HTreeDirectory n = (HTreeDirectory) fetch(t.rootRecid,t.SERIALIZER); n.deleteAllChildren(); delete(t.rootRecid); }else{ throw new InternalError("unknown collection type: "+(o==null?null:o.getClass())); } delete(recid); collections.remove(name); dir.remove(name); }catch(IOException e){ throw new IOError(e); } } /** we need to set reference to this DB instance, so serializer needs to be here*/ final Serializer defaultSerializationSerializer = new Serializer(){ public void serialize(DataOutput out, Serialization obj) throws IOException { LongPacker.packLong(out,obj.serialClassInfoRecid); SerialClassInfo.serializer.serialize(out,obj.registered); } public Serialization deserialize(DataInput in) throws IOException, ClassNotFoundException { final long recid = LongPacker.unpackLong(in); final ArrayList classes = SerialClassInfo.serializer.deserialize(in); return new Serialization(DBAbstract.this,recid,classes); } }; public synchronized Serializer defaultSerializer() { try{ long serialClassInfoRecid = getRoot(SERIAL_CLASS_INFO_RECID_ROOT); if (serialClassInfoRecid == 0) { //allocate new recid serialClassInfoRecid = insert(null,Utils.NULL_SERIALIZER,false); //and insert new serializer Serialization ser = new Serialization(this,serialClassInfoRecid,new ArrayList()); update(serialClassInfoRecid,ser, defaultSerializationSerializer); setRoot(SERIAL_CLASS_INFO_RECID_ROOT, serialClassInfoRecid); return ser; }else{ return fetch(serialClassInfoRecid,defaultSerializationSerializer); } } catch (IOException e) { throw new IOError(e); } } final protected void checkNotClosed(){ if(isClosed()) throw new IllegalStateException("db was closed"); } protected abstract void setRoot(byte root, long recid); protected abstract long getRoot(byte root); synchronized public long collectionSize(Object collection){ if(collection instanceof BTreeMap){ BTreeMap t = (BTreeMap) collection; if(t.fromKey!=null|| t.toKey!=null) throw new IllegalArgumentException("collectionSize does not work on BTree submap"); return t.tree._entries; }else if(collection instanceof HTree){ return ((HTree)collection).getRoot().size; }else if(collection instanceof HTreeSet){ return collectionSize(((HTreeSet) collection).map); }else if(collection instanceof BTreeSet){ return collectionSize(((BTreeSet) collection).map); }else if(collection instanceof LinkedList2){ return ((LinkedList2)collection).getRoot().size; }else{ throw new IllegalArgumentException("Not JDBM collection"); } } void addShutdownHook(){ if(shutdownCloseThread!=null){ shutdownCloseThread = new ShutdownCloseThread(); Runtime.getRuntime().addShutdownHook(shutdownCloseThread); } } public void close(){ if(shutdownCloseThread!=null){ Runtime.getRuntime().removeShutdownHook(shutdownCloseThread); shutdownCloseThread.dbToClose = null; shutdownCloseThread = null; } } ShutdownCloseThread shutdownCloseThread = null; private static class ShutdownCloseThread extends Thread{ DBAbstract dbToClose = null; ShutdownCloseThread(){ super("JDBM shutdown"); } public void run(){ if(dbToClose!=null && !dbToClose.isClosed()){ dbToClose.shutdownCloseThread = null; dbToClose.close(); } } } synchronized public void rollback() { try { for(WeakReference o:collections.values()){ Object c = o.get(); if(c != null && c instanceof BTreeMap){ //reload tree BTreeMap m = (BTreeMap) c; m.tree = fetch(m.tree.getRecid()); } if(c != null && c instanceof BTreeSet){ //reload tree BTreeSet m = (BTreeSet) c; m.map.tree = fetch(m.map.tree.getRecid()); } } } catch (IOException e) { throw new IOError(e); } } } ================================================ FILE: src/main/java/org/apache/jdbm/DBCache.java ================================================ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.IOError; import java.io.IOException; import java.util.Comparator; import java.util.Iterator; /** * Abstract class with common cache functionality */ abstract class DBCache extends DBStore{ static final int NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT = 1024; static final byte NONE = 1; static final byte MRU = 2; static final byte WEAK = 3; static final byte SOFT = 4; static final byte HARD = 5; static final class DirtyCacheEntry { long _recid; //TODO recid is already part of _hashDirties, so this field could be removed to save memory Object _obj; Serializer _serializer; } /** * Dirty status of _hash CacheEntry Values */ final protected LongHashMap _hashDirties = new LongHashMap(); private Serializer cachedDefaultSerializer = null; /** * Construct a CacheRecordManager wrapping another DB and * using a given cache policy. */ public DBCache(String filename, boolean readonly, boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile, boolean deleteFilesAfterClose,boolean lockingDisabled){ super(filename, readonly, transactionDisabled, cipherIn, cipherOut, useRandomAccessFile, deleteFilesAfterClose,lockingDisabled); } @Override public synchronized Serializer defaultSerializer(){ if(cachedDefaultSerializer==null) cachedDefaultSerializer = super.defaultSerializer(); return cachedDefaultSerializer; } @Override boolean needsAutoCommit() { return super.needsAutoCommit()|| (transactionsDisabled && !commitInProgress && _hashDirties.size() > NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT); } public synchronized long insert(final A obj, final Serializer serializer, final boolean disableCache) throws IOException { checkNotClosed(); if(super.needsAutoCommit()) commit(); if(disableCache) return super.insert(obj, serializer, disableCache); //prealocate recid so we have something to return final long recid = super.insert(PREALOCATE_OBJ, null, disableCache); // super.update(recid, obj,serializer); // return super.insert(obj,serializer,disableCache); //and create new dirty record for future update final DirtyCacheEntry e = new DirtyCacheEntry(); e._recid = recid; e._obj = obj; e._serializer = serializer; _hashDirties.put(recid,e); return recid; } public synchronized void commit() { try{ commitInProgress = true; updateCacheEntries(); super.commit(); }finally { commitInProgress = false; } } public synchronized void rollback(){ cachedDefaultSerializer = null; _hashDirties.clear(); super.rollback(); } private static final Comparator DIRTY_COMPARATOR = new Comparator() { final public int compare(DirtyCacheEntry o1, DirtyCacheEntry o2) { return (int) (o1._recid - o2._recid); } }; /** * Update all dirty cache objects to the underlying DB. */ protected void updateCacheEntries() { try { synchronized(_hashDirties){ while(!_hashDirties.isEmpty()){ //make defensive copy of values as _db.update() may trigger changes in db //and this would modify dirties again DirtyCacheEntry[] vals = new DirtyCacheEntry[_hashDirties.size()]; Iterator iter = _hashDirties.valuesIterator(); for(int i = 0;i _hash; /** * Maximum number of objects in the cache. */ protected int _max; /** * Beginning of linked-list of cache elements. First entry is element * which has been used least recently. */ protected CacheEntry _first; /** * End of linked-list of cache elements. Last entry is element * which has been used most recently. */ protected CacheEntry _last; /** * Construct a CacheRecordManager wrapping another DB and * using a given cache policy. */ public DBCacheMRU(String filename, boolean readonly, boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile, boolean deleteFilesAfterClose, int cacheMaxRecords, boolean lockingDisabled) { super(filename, readonly, transactionDisabled, cipherIn, cipherOut, useRandomAccessFile, deleteFilesAfterClose,lockingDisabled); _hash = new LongHashMap(cacheMaxRecords); _max = cacheMaxRecords; } public synchronized A fetch(long recid, Serializer serializer, boolean disableCache) throws IOException { if (disableCache) return super.fetch(recid, serializer, disableCache); else return fetch(recid, serializer); } public synchronized void delete(long recid) throws IOException { checkNotClosed(); super.delete(recid); synchronized (_hash){ CacheEntry entry = _hash.get(recid); if (entry != null) { removeEntry(entry); _hash.remove(entry._recid); } _hashDirties.remove(recid); } if(super.needsAutoCommit()) commit(); } public synchronized void update(final long recid, final A obj, final Serializer serializer) throws IOException { checkNotClosed(); synchronized (_hash){ //remove entry if it already exists CacheEntry entry = cacheGet(recid); if (entry != null) { _hash.remove(recid); removeEntry(entry); } //check if entry is in dirties, in this case just update its object DirtyCacheEntry e = _hashDirties.get(recid); if(e!=null){ if(recid!=e._recid) throw new Error(); e._obj = obj; e._serializer = serializer; return; } //create new dirty entry e = new DirtyCacheEntry(); e._recid = recid; e._obj = obj; e._serializer = serializer; _hashDirties.put(recid,e); } if(super.needsAutoCommit()) commit(); } public synchronized A fetch(long recid, Serializer serializer) throws IOException { checkNotClosed(); final CacheEntry entry = cacheGet(recid); if (entry != null) { return (A) entry._obj; } //check dirties final DirtyCacheEntry entry2 = _hashDirties.get(recid); if(entry2!=null){ return (A) entry2._obj; } A value = super.fetch(recid, serializer); if(super.needsAutoCommit()) commit(); //put record into MRU cache cachePut(recid, value); return value; } public synchronized void close() { if(isClosed()) return; updateCacheEntries(); super.close(); _hash = null; } public synchronized void rollback() { // discard all cache entries since we don't know which entries // where part of the transaction synchronized (_hash){ _hash.clear(); _first = null; _last = null; } super.rollback(); } /** * Obtain an object in the cache */ protected CacheEntry cacheGet(long key) { synchronized (_hash){ CacheEntry entry = _hash.get(key); if ( entry != null && _last != entry) { //touch entry removeEntry(entry); addEntry(entry); } return entry; } } /** * Place an object in the cache. * * @throws IOException */ protected void cachePut(final long recid, final Object value) throws IOException { synchronized (_hash){ CacheEntry entry = _hash.get(recid); if (entry != null) { entry._obj = value; //touch entry if (_last != entry) { removeEntry(entry); addEntry(entry); } } else { if (_hash.size() >= _max) { // purge and recycle entry entry = purgeEntry(); entry._recid = recid; entry._obj = value; } else { entry = new CacheEntry(recid, value); } addEntry(entry); _hash.put(entry._recid, entry); } } } /** * Add a CacheEntry. Entry goes at the end of the list. */ protected void addEntry(CacheEntry entry) { synchronized (_hash){ if (_first == null) { _first = entry; _last = entry; } else { _last._next = entry; entry._previous = _last; _last = entry; } } } /** * Remove a CacheEntry from linked list */ protected void removeEntry(CacheEntry entry) { synchronized (_hash){ if (entry == _first) { _first = entry._next; } if (_last == entry) { _last = entry._previous; } CacheEntry previous = entry._previous; CacheEntry next = entry._next; if (previous != null) { previous._next = next; } if (next != null) { next._previous = previous; } entry._previous = null; entry._next = null; } } /** * Purge least recently used object from the cache * * @return recyclable CacheEntry */ protected CacheEntry purgeEntry() { synchronized (_hash){ CacheEntry entry = _first; if (entry == null) return new CacheEntry(-1, null); removeEntry(entry); _hash.remove(entry._recid); entry._obj = null; return entry; } } @SuppressWarnings("unchecked") static final class CacheEntry { protected long _recid; protected Object _obj; protected CacheEntry _previous; protected CacheEntry _next; CacheEntry(long recid, Object obj) { _recid = recid; _obj = obj; } } public void clearCache() { if(debug) System.err.println("DBCache: Clear cache"); // discard all cache entries since we don't know which entries // where part of the transaction synchronized (_hash){ _hash.clear(); _first = null; _last = null; //clear dirties updateCacheEntries(); } } } ================================================ FILE: src/main/java/org/apache/jdbm/DBCacheRef.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.IOException; import java.lang.ref.ReferenceQueue; import java.lang.ref.SoftReference; import java.lang.ref.WeakReference; import java.util.Iterator; import java.util.concurrent.atomic.AtomicInteger; /** * A DB wrapping and caching another DB. * * @author Jan Kotek * @author Alex Boisvert * @author Cees de Groot * * TODO add 'cache miss' statistics */ public class DBCacheRef extends DBCache { private static final boolean debug = false; /** * If Soft Cache is enabled, this contains softly referenced clean entries. * If entry became dirty, it is moved to _hash with limited size. * This map is accessed from SoftCache Disposer thread, so all access must be * synchronized */ protected LongHashMap _softHash; /** * Reference queue used to collect Soft Cache entries */ protected ReferenceQueue _refQueue; /** * Thread in which Soft Cache references are disposed */ protected Thread _softRefThread; protected static AtomicInteger threadCounter = new AtomicInteger(0); /** counter which counts number of insert since last 'action'*/ protected int insertCounter = 0; private final boolean _autoClearReferenceCacheOnLowMem; private final byte _cacheType; /** * Construct a CacheRecordManager wrapping another DB and * using a given cache policy. */ public DBCacheRef(String filename, boolean readonly, boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile, boolean deleteFilesAfterClose, byte cacheType, boolean cacheAutoClearOnLowMem, boolean lockingDisabled) { super(filename, readonly, transactionDisabled, cipherIn, cipherOut, useRandomAccessFile, deleteFilesAfterClose, lockingDisabled); this._cacheType = cacheType; _autoClearReferenceCacheOnLowMem = cacheAutoClearOnLowMem; _softHash = new LongHashMap(); _refQueue = new ReferenceQueue(); _softRefThread = new Thread( new SoftRunnable(this, _refQueue), "JDBM Soft Cache Disposer " + (threadCounter.incrementAndGet())); _softRefThread.setDaemon(true); _softRefThread.start(); } void clearCacheIfLowOnMem() { insertCounter = 0; if(!_autoClearReferenceCacheOnLowMem) return; Runtime r = Runtime.getRuntime(); long max = r.maxMemory(); if(max == Long.MAX_VALUE) return; double free = r.freeMemory(); double total = r.totalMemory(); //We believe that free refers to total not max. //Increasing heap size to max would increase to max free = free + (max-total); if(debug) System.err.println("DBCache: freemem = " +free + " = "+(free/max)+"%"); if(free<1e7 || free*4 A fetch(long recid, Serializer serializer, boolean disableCache) throws IOException { if (disableCache) return super.fetch(recid, serializer, disableCache); else return fetch(recid, serializer); } public synchronized void delete(long recid) throws IOException { checkNotClosed(); super.delete(recid); synchronized (_hashDirties){ _hashDirties.remove(recid); } synchronized (_softHash) { Object e = _softHash.remove(recid); if (e != null && e instanceof ReferenceCacheEntry) { ((ReferenceCacheEntry)e).clear(); } } if(needsAutoCommit()) commit(); } public synchronized void update(final long recid, A obj, Serializer serializer) throws IOException { checkNotClosed(); synchronized (_softHash) { //soft cache can not contain dirty objects Object e = _softHash.remove(recid); if (e != null && e instanceof ReferenceCacheEntry) { ((ReferenceCacheEntry)e).clear(); } } synchronized (_hashDirties){ //put into dirty cache final DirtyCacheEntry e = new DirtyCacheEntry(); e._recid = recid; e._obj = obj; e._serializer = serializer; _hashDirties.put(recid,e); } if(needsAutoCommit()) commit(); } public synchronized A fetch(long recid, Serializer serializer) throws IOException { checkNotClosed(); synchronized (_softHash) { Object e = _softHash.get(recid); if (e != null) { if(e instanceof ReferenceCacheEntry) e = ((ReferenceCacheEntry)e).get(); if (e != null) { return (A) e; } } } synchronized (_hashDirties){ DirtyCacheEntry e2 = _hashDirties.get(recid); if(e2!=null){ return (A) e2._obj; } } A value = super.fetch(recid, serializer); if(needsAutoCommit()) commit(); synchronized (_softHash) { if (_cacheType == SOFT) _softHash.put(recid, new SoftCacheEntry(recid, value, _refQueue)); else if (_cacheType == WEAK) _softHash.put(recid, new WeakCacheEntry(recid, value, _refQueue)); else _softHash.put(recid,value); } return value; } public synchronized void close() { checkNotClosed(); updateCacheEntries(); super.close(); _softHash = null; _softRefThread.interrupt(); } public synchronized void rollback() { checkNotClosed(); // discard all cache entries since we don't know which entries // where part of the transaction synchronized (_softHash) { Iterator iter = _softHash.valuesIterator(); while (iter.hasNext()) { ReferenceCacheEntry e = iter.next(); e.clear(); } _softHash.clear(); } super.rollback(); } protected boolean isCacheEntryDirty(DirtyCacheEntry entry) { return _hashDirties.get(entry._recid) != null; } protected void setCacheEntryDirty(DirtyCacheEntry entry, boolean dirty) { if (dirty) { _hashDirties.put(entry._recid, entry); } else { _hashDirties.remove(entry._recid); } } interface ReferenceCacheEntry { long getRecid(); void clear(); Object get(); } @SuppressWarnings("unchecked") static final class SoftCacheEntry extends SoftReference implements ReferenceCacheEntry { protected final long _recid; public long getRecid() { return _recid; } SoftCacheEntry(long recid, Object obj, ReferenceQueue queue) { super(obj, queue); _recid = recid; } } @SuppressWarnings("unchecked") static final class WeakCacheEntry extends WeakReference implements ReferenceCacheEntry { protected final long _recid; public long getRecid() { return _recid; } WeakCacheEntry(long recid, Object obj, ReferenceQueue queue) { super(obj, queue); _recid = recid; } } /** * Runs in separate thread and cleans SoftCache. * Runnable auto exists when CacheRecordManager is GCed * * @author Jan Kotek */ static final class SoftRunnable implements Runnable { private ReferenceQueue entryQueue; private WeakReference db2; public SoftRunnable(DBCacheRef db, ReferenceQueue entryQueue) { this.db2 = new WeakReference(db); this.entryQueue = entryQueue; } public void run() { while (true) try { //collect next item from cache, //limit 10000 ms is to keep periodically checking if db was GCed ReferenceCacheEntry e = (ReferenceCacheEntry) entryQueue.remove(10000); //check if db was GCed, cancel in that case DBCacheRef db = db2.get(); if (db == null) return; if (e != null) { synchronized (db._softHash) { int counter = 0; while (e != null) { db._softHash.remove(e.getRecid()); e = (SoftCacheEntry) entryQueue.poll(); if(debug) counter++; } if(debug) System.err.println("DBCache: "+counter+" objects released from ref cache."); } }else{ //check memory consumption every 10 seconds db.clearCacheIfLowOnMem(); } } catch (InterruptedException e) { return; } catch (Throwable e) { //this thread must keep spinning, //otherwise SoftCacheEntries would not be disposed e.printStackTrace(); } } } public void clearCache() { if(debug) System.err.println("DBCache: Clear cache"); synchronized (_softHash) { if(_cacheType!=HARD){ Iterator iter = _softHash.valuesIterator(); while (iter.hasNext()) { ReferenceCacheEntry e = iter.next(); e.clear(); } } _softHash.clear(); } } } ================================================ FILE: src/main/java/org/apache/jdbm/DBMaker.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import javax.crypto.Cipher; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; import java.io.IOError; import java.security.spec.KeySpec; /** * Class used to configure and create DB. It uses builder pattern. */ public class DBMaker { private byte cacheType = DBCacheRef.MRU; private int mruCacheSize = 2048; private String location = null; private boolean disableTransactions = false; private boolean lockingDisabled = false; private boolean readonly = false; private String password = null; private boolean useAES256Bit = true; private boolean useRandomAccessFile = false; private boolean autoClearRefCacheOnLowMem = true; private boolean closeOnJVMExit = false; private boolean deleteFilesAfterCloseFlag = false; private DBMaker(){} /** * Creates new DBMaker and sets file to load data from. * @param file to load data from * @return new DBMaker */ public static DBMaker openFile(String file){ DBMaker m = new DBMaker(); m.location = file; return m; } /** * Creates new DBMaker which uses in memory store. Data will be lost after JVM exits. * @return new DBMaker */ public static DBMaker openMemory(){ return new DBMaker(); } /** * Open store in zip file * * @param zip file * @return new DBMaker */ public static DBMaker openZip(String zip) { DBMaker m = new DBMaker(); m.location = "$$ZIP$$://"+zip; return m; } static String isZipFileLocation(String location){ String match = "$$ZIP$$://"; if( location.startsWith(match)){ return location.substring(match.length()); } return null; } /** * Use WeakReference for cache. * This cache does not improve performance much, * but prevents JDBM from creating multiple instances of the same object. * * @return this builder */ public DBMaker enableWeakCache() { cacheType = DBCacheRef.WEAK; return this; } /** * Use SoftReference for cache. * This cache greatly improves performance if you have enoguth memory. * Instances in cache are Garbage Collected when memory gets low * * @return this builder */ public DBMaker enableSoftCache() { cacheType = DBCacheRef.SOFT; return this; } /** * Use hard reference for cache. * This greatly improves performance if there is enought memory * Hard cache has smaller memory overhead then Soft or Weak, because * reference objects and queue does not have to be maintained * * @return this builder */ public DBMaker enableHardCache() { cacheType = DBCacheRef.HARD; return this; } /** * Use 'Most Recently Used' cache with limited size. * Oldest instances are released from cache when new instances are fetched. * This cache is not cleared by GC. Is good for systems with limited memory. *

* Default size for MRU cache is 2048 records. * * @return this builder */ public DBMaker enableMRUCache() { cacheType = DBCacheRef.MRU; return this; } /** * * Sets 'Most Recently Used' cache size. This cache is activated by default with size 2048 * * @param cacheSize number of instances which will be kept in cache. * @return this builder */ public DBMaker setMRUCacheSize(int cacheSize) { if (cacheSize < 0) throw new IllegalArgumentException("Cache size is smaller than zero"); cacheType = DBCacheRef.MRU; mruCacheSize = cacheSize; return this; } /** * If reference (soft,weak or hard) cache is enabled, * GC may not release references fast enough (or not at all in case of hard cache). * So JDBM periodically checks amount of free heap memory. * If free memory is less than 25% or 10MB, * JDBM completely clears its reference cache to prevent possible memory issues. *

* Calling this method disables auto cache clearing when mem is low. * And of course it can cause some out of memory exceptions. * * @return this builder */ public DBMaker disableCacheAutoClear(){ this.autoClearRefCacheOnLowMem = false; return this; } /** * Enabled storage encryption using AES cipher. JDBM supports both 128 bit and 256 bit encryption if JRE provides it. * There are some restrictions on AES 256 bit and not all JREs have it by default. *

* Storage can not be read (decrypted), unless the key is provided next time it is opened * * @param password used to encrypt store * @param useAES256Bit if true strong AES 256 bit encryption is used. Otherwise more usual AES 128 bit is used. * @return this builder */ public DBMaker enableEncryption(String password, boolean useAES256Bit) { this.password = password; this.useAES256Bit = useAES256Bit; return this; } /** * Make DB readonly. * Update/delete/insert operation will throw 'UnsupportedOperationException' * * @return this builder */ public DBMaker readonly() { readonly = true; return this; } /** * Disable cache completely * * @return this builder */ public DBMaker disableCache() { cacheType = DBCacheRef.NONE; return this; } /** * Option to disable transaction (to increase performance at the cost of potential data loss). * Transactions are enabled by default *

* Switches off transactioning for the record manager. This means * that a) a transaction log is not kept, and b) writes aren't * synch'ed after every update. Writes are cached in memory and then flushed * to disk every N writes. You may also flush writes manually by calling commit(). * This is useful when batch inserting into a new database. *

* When using this, database must be properly closed before JVM shutdown. * Failing to do so may and WILL corrupt store. * * @return this builder */ public DBMaker disableTransactions() { this.disableTransactions = true; return this; } /** * Disable file system based locking (for file systems that do not support it). * * Locking is not supported by many remote or distributed file systems; such * as Lustre and NFS. Attempts to perform locks will result in an * IOException with the message "Function not implemented". * * Disabling locking will avoid this issue, though of course it comes with * all the issues of uncontrolled file access. * * @return this builder */ public DBMaker disableLocking(){ this.lockingDisabled = true; return this; } /** * By default JDBM uses mapped memory buffers to read from files. * But this may behave strangely on some platforms. * Safe alternative is to use old RandomAccessFile rather then mapped ByteBuffer. * There is typically slower (pages needs to be copyed into memory on every write). * * @return this builder */ public DBMaker useRandomAccessFile(){ this.useRandomAccessFile = true; return this; } /** * Registers shutdown hook and close database on JVM exit, if it was not already closed; * * @return this builder */ public DBMaker closeOnExit(){ this.closeOnJVMExit = true; return this; } /** * Delete all storage files after DB is closed * * @return this builder */ public DBMaker deleteFilesAfterClose(){ this.deleteFilesAfterCloseFlag = true; return this; } /** * Opens database with settings earlier specified in this builder. * * @return new DB * @throws java.io.IOError if db could not be opened */ public DB make() { Cipher cipherIn = null; Cipher cipherOut = null; if (password != null) try { //initialize ciphers //this code comes from stack owerflow //http://stackoverflow.com/questions/992019/java-256bit-aes-encryption/992413#992413 byte[] salt = new byte[]{3, -34, 123, 53, 78, 121, -12, -1, 45, -12, -48, 89, 11, 100, 99, 8}; SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1"); KeySpec spec = new PBEKeySpec(password.toCharArray(), salt, 1024, useAES256Bit?256:128); SecretKey tmp = factory.generateSecret(spec); SecretKey secret = new SecretKeySpec(tmp.getEncoded(), "AES"); String transform = "AES/CBC/NoPadding"; IvParameterSpec params = new IvParameterSpec(salt); cipherIn = Cipher.getInstance(transform); cipherIn.init(Cipher.ENCRYPT_MODE, secret, params); cipherOut = Cipher.getInstance(transform); cipherOut.init(Cipher.DECRYPT_MODE, secret, params); //sanity check, try with page size byte[] data = new byte[Storage.PAGE_SIZE]; byte[] encData = cipherIn.doFinal(data); if (encData.length != Storage.PAGE_SIZE) throw new Error("Page size changed after encryption, make sure you use '/NoPadding'"); byte[] data2 = cipherOut.doFinal(encData); for (int i = 0; i < data.length; i++) { if (data[i] != data2[i]) throw new Error("Encryption provided by JRE does not work"); } } catch (Exception e) { throw new IOError(e); } DBAbstract db = null; if (cacheType == DBCacheRef.MRU){ db = new DBCacheMRU(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag, mruCacheSize,lockingDisabled); }else if( cacheType == DBCacheRef.SOFT || cacheType == DBCacheRef.HARD || cacheType == DBCacheRef.WEAK) { db = new DBCacheRef(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag, cacheType,autoClearRefCacheOnLowMem,lockingDisabled); } else if (cacheType == DBCacheRef.NONE) { db = new DBStore(location, readonly, disableTransactions, cipherIn, cipherOut,useRandomAccessFile,deleteFilesAfterCloseFlag,lockingDisabled); } else { throw new IllegalArgumentException("Unknown cache type: " + cacheType); } if(closeOnJVMExit){ db.addShutdownHook(); } return db; } } ================================================ FILE: src/main/java/org/apache/jdbm/DBStore.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.*; import java.util.*; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; /** * This class manages records, which are uninterpreted blobs of data. The * set of operations is simple and straightforward: you communicate with * the class using long "rowids" and byte[] data blocks. Rowids are returned * on inserts and you can stash them away someplace safe to be able to get * back to them. Data blocks can be as long as you wish, and may have * lengths different from the original when updating. *

* Operations are synchronized, so that only one of them will happen * concurrently even if you hammer away from multiple threads. Operations * are made atomic by keeping a transaction log which is recovered after * a crash, so the operations specified by this interface all have ACID * properties. *

* You identify a file by just the name. The package attaches .db * for the database file, and .lg for the transaction log. The * transaction log is synchronized regularly and then restarted, so don't * worry if you see the size going up and down. * * @author Alex Boisvert * @author Cees de Groot */ class DBStore extends DBAbstract { /** * Version of storage. It should be safe to open lower versions, but engine should throw exception * while opening new versions (as it contains unsupported features or serialization) */ static final long STORE_FORMAT_VERSION = 1L; /** * Underlying file for store records. */ private PageFile _file; /** * Page manager for physical manager. */ private PageManager _pageman; /** * Physical row identifier manager. */ private PhysicalRowIdManager _physMgr; /** * Indicated that store is opened for readonly operations * If true, store will throw UnsupportedOperationException when update/insert/delete operation is called */ private final boolean readonly; final boolean transactionsDisabled; private final boolean deleteFilesAfterClose; private static final int AUTOCOMMIT_AFTER_N_PAGES = 1024 * 5; boolean commitInProgress = false; /** * cipher used for decryption, may be null */ private Cipher cipherOut; /** * cipher used for encryption, may be null */ private Cipher cipherIn; private boolean useRandomAccessFile; private boolean lockingDisabled; void checkCanWrite() { if (readonly) throw new UnsupportedOperationException("Could not write, store is opened as read-only"); } /** * Logigal to Physical row identifier manager. */ private LogicalRowIdManager _logicMgr; /** * Static debugging flag */ public static final boolean DEBUG = false; static final long PREALOCATE_PHYS_RECID = Short.MIN_VALUE; static final Object PREALOCATE_OBJ = new Object(); private final DataInputOutput buffer = new DataInputOutput(); private boolean bufferInUse = false; private final String _filename; public DBStore(String filename, boolean readonly, boolean transactionDisabled, boolean lockingDisabled) throws IOException { this(filename, readonly, transactionDisabled, null, null, false,false,false); } /** * Creates a record manager for the indicated file * * @throws IOException when the file cannot be opened or is not * a valid file content-wise. */ public DBStore(String filename, boolean readonly, boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile, boolean deleteFilesAfterClose, boolean lockingDisabled){ _filename = filename; this.readonly = readonly; this.transactionsDisabled = transactionDisabled; this.cipherIn = cipherIn; this.cipherOut = cipherOut; this.useRandomAccessFile = useRandomAccessFile; this.deleteFilesAfterClose = deleteFilesAfterClose; this.lockingDisabled = lockingDisabled; reopen(); } private void reopen() { try{ _file = new PageFile(_filename, readonly, transactionsDisabled, cipherIn, cipherOut,useRandomAccessFile,lockingDisabled); _pageman = new PageManager(_file); _physMgr = new PhysicalRowIdManager(_file, _pageman); _logicMgr = new LogicalRowIdManager(_file, _pageman); long versionNumber = getRoot(STORE_VERSION_NUMBER_ROOT); if (versionNumber > STORE_FORMAT_VERSION) throw new IOException("Unsupported version of store. Please update JDBM. Minimal supported ver:" + STORE_FORMAT_VERSION + ", store ver:" + versionNumber); if (!readonly) setRoot(STORE_VERSION_NUMBER_ROOT, STORE_FORMAT_VERSION); }catch(IOException e){ throw new IOError(e); } } /** * Closes the record manager. * * @throws IOException when one of the underlying I/O operations fails. */ public synchronized void close() { checkNotClosed(); try { super.close(); _pageman.close(); _file.close(); if(deleteFilesAfterClose) _file.storage.deleteAllFiles(); _pageman = null; _file = null; } catch (IOException e) { throw new IOError(e); } } public boolean isClosed() { return _pageman==null; } public synchronized long insert(final A obj, final Serializer serializer, final boolean disableCache) throws IOException { checkNotClosed(); checkCanWrite(); if (needsAutoCommit()) { commit(); } if (bufferInUse) { //current reusable buffer is in use, have to fallback into creating new instances DataInputOutput buffer2 = new DataInputOutput(); return insert2(obj, serializer, buffer2); } try { bufferInUse = true; return insert2(obj, serializer, buffer); } finally { bufferInUse = false; } } boolean needsAutoCommit() { return transactionsDisabled && !commitInProgress && (_file.getDirtyPageCount() >= AUTOCOMMIT_AFTER_N_PAGES ); } private long insert2(A obj, Serializer serializer, DataInputOutput buf) throws IOException { buf.reset(); long physRowId; if(obj==PREALOCATE_OBJ){ //if inserted record is PREALOCATE_OBJ , it gets special handling. //it is inserted only into _logicMgr with special value to indicate null //this is used to preallocate recid for lazy inserts in cache physRowId = PREALOCATE_PHYS_RECID; }else{ serializer.serialize(buf, obj); if(buf.getPos()>RecordHeader.MAX_RECORD_SIZE){ throw new IllegalArgumentException("Too big record. JDBM only supports record size up to: "+RecordHeader.MAX_RECORD_SIZE+" bytes. Record size was: "+buf.getPos()); } physRowId = _physMgr.insert(buf.getBuf(), 0, buf.getPos()); } final long recid = _logicMgr.insert(physRowId); if (DEBUG) { System.out.println("BaseRecordManager.insert() recid " + recid + " length " + buf.getPos()); } return compressRecid(recid); } public synchronized void delete(long logRowId) throws IOException { checkNotClosed(); checkCanWrite(); if (logRowId <= 0) { throw new IllegalArgumentException("Argument 'recid' is invalid: " + logRowId); } if (needsAutoCommit()) { commit(); } if (DEBUG) { System.out.println("BaseRecordManager.delete() recid " + logRowId); } logRowId = decompressRecid(logRowId); long physRowId = _logicMgr.fetch(logRowId); _logicMgr.delete(logRowId); if(physRowId!=PREALOCATE_PHYS_RECID){ _physMgr.free(physRowId); } } public synchronized void update(long recid, A obj, Serializer serializer) throws IOException { checkNotClosed(); checkCanWrite(); if (recid <= 0) { throw new IllegalArgumentException("Argument 'recid' is invalid: " + recid); } if (needsAutoCommit()) { commit(); } if (bufferInUse) { //current reusable buffer is in use, have to create new instances DataInputOutput buffer2 = new DataInputOutput(); update2(recid, obj, serializer, buffer2); return; } try { bufferInUse = true; update2(recid, obj, serializer, buffer); } finally { bufferInUse = false; } } private void update2(long logRecid, final A obj, final Serializer serializer, final DataInputOutput buf) throws IOException { logRecid = decompressRecid(logRecid); long physRecid = _logicMgr.fetch(logRecid); if (physRecid == 0) throw new IOException("Can not update, recid does not exist: " + logRecid); buf.reset(); serializer.serialize(buf, obj); if (DEBUG) { System.out.println("BaseRecordManager.update() recid " + logRecid + " length " + buf.getPos()); } long newRecid = physRecid!=PREALOCATE_PHYS_RECID? _physMgr.update(physRecid, buf.getBuf(), 0, buf.getPos()): //previous record was only virtual and does not actually exist, so make new insert _physMgr.insert(buf.getBuf(),0,buf.getPos()); _logicMgr.update(logRecid, newRecid); } public synchronized A fetch(final long recid, final Serializer serializer) throws IOException { checkNotClosed(); if (recid <= 0) { throw new IllegalArgumentException("Argument 'recid' is invalid: " + recid); } if (bufferInUse) { //current reusable buffer is in use, have to create new instances DataInputOutput buffer2 = new DataInputOutput(); return fetch2(recid, serializer, buffer2); } try { bufferInUse = true; return fetch2(recid, serializer, buffer); } finally { bufferInUse = false; } } public synchronized A fetch(long recid, Serializer serializer, boolean disableCache) throws IOException { //we dont have any cache, so can ignore disableCache parameter return fetch(recid, serializer); } private A fetch2(long recid, final Serializer serializer, final DataInputOutput buf) throws IOException { recid = decompressRecid(recid); buf.reset(); long physLocation = _logicMgr.fetch(recid); if (physLocation == 0) { //throw new IOException("Record not found, recid: "+recid); return null; } if(physLocation == PREALOCATE_PHYS_RECID){ throw new InternalError("cache should prevent this!"); } _physMgr.fetch(buf, physLocation); if (DEBUG) { System.out.println("BaseRecordManager.fetch() recid " + recid + " length " + buf.getPos()); } buf.resetForReading(); try { return serializer.deserialize(buf); //TODO there should be write limit to throw EOFException } catch (ClassNotFoundException e) { throw new IOError(e); } } byte[] fetchRaw(long recid) throws IOException { recid = decompressRecid(recid); long physLocation = _logicMgr.fetch(recid); if (physLocation == 0) { //throw new IOException("Record not found, recid: "+recid); return null; } DataInputOutput i = new DataInputOutput(); _physMgr.fetch(i, physLocation); return i.toByteArray(); } public synchronized long getRoot(final byte id){ checkNotClosed(); return _pageman.getFileHeader().fileHeaderGetRoot(id); } public synchronized void setRoot(final byte id, final long rowid){ checkNotClosed(); checkCanWrite(); _pageman.getFileHeader().fileHeaderSetRoot(id, rowid); } public synchronized void commit() { try { commitInProgress = true; checkNotClosed(); checkCanWrite(); /** flush free phys rows into pages*/ _physMgr.commit(); _logicMgr.commit(); /**commit pages */ _pageman.commit(); } catch (IOException e) { throw new IOError(e); }finally { commitInProgress= false; } } public synchronized void rollback() { if (transactionsDisabled) throw new IllegalAccessError("Transactions are disabled, can not rollback"); try { checkNotClosed(); _physMgr.rollback(); _logicMgr.rollback(); _pageman.rollback(); super.rollback(); } catch (IOException e) { throw new IOError(e); } } public void copyToZip(String zipFile) { try { String zip = zipFile; String zip2 = "db"; ZipOutputStream z = new ZipOutputStream(new FileOutputStream(zip)); //copy zero pages { String file = zip2 + 0; z.putNextEntry(new ZipEntry(file)); z.write(Utils.encrypt(cipherIn, _pageman.getHeaderBufData())); z.closeEntry(); } //iterate over pages and create new file for each for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo page = _file.get(pageid); String file = zip2 + pageid; z.putNextEntry(new ZipEntry(file)); z.write(Utils.encrypt(cipherIn, page.getData())); z.closeEntry(); _file.release(page); } for (long pageid = _pageman.getFirst(Magic.FREELOGIDS_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo page = _file.get(pageid); String file = zip2 + pageid; z.putNextEntry(new ZipEntry(file)); z.write(Utils.encrypt(cipherIn, page.getData())); z.closeEntry(); _file.release(page); } for (long pageid = _pageman.getFirst(Magic.USED_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo page = _file.get(pageid); String file = zip2 + pageid; z.putNextEntry(new ZipEntry(file)); z.write(Utils.encrypt(cipherIn, page.getData())); z.closeEntry(); _file.release(page); } for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo page = _file.get(pageid); String file = zip2 + pageid; z.putNextEntry(new ZipEntry(file)); z.write(Utils.encrypt(cipherIn, page.getData())); z.closeEntry(); _file.release(page); } for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_ROOT_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo page = _file.get(pageid); String file = zip2 + pageid; z.putNextEntry(new ZipEntry(file)); z.write(Utils.encrypt(cipherIn, page.getData())); z.closeEntry(); _file.release(page); } z.close(); } catch (IOException e) { throw new IOError(e); } } public synchronized void clearCache() { //no cache } private long statisticsCountPages(short pageType) throws IOException { long pageCounter = 0; for (long pageid = _pageman.getFirst(pageType); pageid != 0; pageid = _pageman.getNext(pageid) ) { pageCounter++; } return pageCounter; } public synchronized String calculateStatistics() { checkNotClosed(); try { final StringBuilder b = new StringBuilder(); //count pages { b.append("PAGES:\n"); long total = 0; long pages = statisticsCountPages(Magic.USED_PAGE); total += pages; b.append(" " + pages + " used pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); pages = statisticsCountPages(Magic.TRANSLATION_PAGE); total += pages; b.append(" " + pages + " record translation pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); pages = statisticsCountPages(Magic.FREE_PAGE); total += pages; b.append(" " + pages + " free (unused) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); pages = statisticsCountPages(Magic.FREEPHYSIDS_PAGE); total += pages; b.append(" " + pages + " free (phys) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); pages = statisticsCountPages(Magic.FREELOGIDS_PAGE); total += pages; b.append(" " + pages + " free (logical) pages with size " + Utils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); b.append(" Total number of pages is " + total + " with size " + Utils.formatSpaceUsage(total * Storage.PAGE_SIZE) + "\n"); } { b.append("RECORDS:\n"); long recordCount = 0; long freeRecordCount = 0; long maximalRecordSize = 0; long maximalAvailSizeDiff = 0; long totalRecordSize = 0; long totalAvailDiff = 0; //count records for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo io = _file.get(pageid); for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) { final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE; final long physLoc = io.pageHeaderGetLocation((short) pos); if (physLoc == 0) { freeRecordCount++; continue; } if(physLoc == PREALOCATE_PHYS_RECID){ continue; } recordCount++; //get size PageIo page = _file.get(physLoc>>> Storage.PAGE_SIZE_SHIFT); final short physOffset =(short) (physLoc & Storage.OFFSET_MASK); int availSize = RecordHeader.getAvailableSize(page, physOffset); int currentSize = RecordHeader.getCurrentSize(page, physOffset); _file.release(page); maximalAvailSizeDiff = Math.max(maximalAvailSizeDiff, availSize - currentSize); maximalRecordSize = Math.max(maximalRecordSize, currentSize); totalAvailDiff += availSize - currentSize; totalRecordSize += currentSize; } _file.release(io); } b.append(" Contains " + recordCount + " records and " + freeRecordCount + " free slots.\n"); b.append(" Total space occupied by data is " + Utils.formatSpaceUsage(totalRecordSize) + "\n"); b.append(" Average data size in record is " + Utils.formatSpaceUsage(Math.round(1D * totalRecordSize / recordCount)) + "\n"); b.append(" Maximal data size in record is " + Utils.formatSpaceUsage(maximalRecordSize) + "\n"); b.append(" Space wasted in record fragmentation is " + Utils.formatSpaceUsage(totalAvailDiff) + "\n"); b.append(" Maximal space wasted in single record fragmentation is " + Utils.formatSpaceUsage(maximalAvailSizeDiff) + "\n"); } return b.toString(); } catch (IOException e) { throw new IOError(e); } } public synchronized void defrag(boolean sortCollections) { try { checkNotClosed(); checkCanWrite(); commit(); final String filename2 = _filename + "_defrag" + System.currentTimeMillis(); final String filename1 = _filename; DBStore db2 = new DBStore(filename2, false, true, cipherIn, cipherOut, false,false,false); //recreate logical file with original page layout { //find minimal logical pageid (logical pageids are negative) LongHashMap logicalPages = new LongHashMap(); long minpageid = 0; for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { minpageid = Math.min(minpageid, pageid); logicalPages.put(pageid, Utils.EMPTY_STRING); } //fill second db with logical pages long pageCounter = 0; for ( long pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE); pageid >= minpageid; pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE) ) { pageCounter++; if (pageCounter % 1000 == 0) db2.commit(); } logicalPages = null; } //reinsert collections so physical records are located near each other //iterate over named object recids, it is sorted with TreeSet if(sortCollections){ long nameRecid = getRoot(NAME_DIRECTORY_ROOT); Collection recids = new TreeSet(); if(nameRecid!=0){ HTree m = fetch(nameRecid); recids.addAll(m.values()); } for (Long namedRecid : recids) { Object obj = fetch(namedRecid); if (obj instanceof LinkedList) { LinkedList2.defrag(namedRecid, this, db2); } else if (obj instanceof HTree) { HTree.defrag(namedRecid, this, db2); } else if (obj instanceof BTree) { BTree.defrag(namedRecid, this, db2); } } } for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman.getNext(pageid) ) { PageIo io = _file.get(pageid); for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) { final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE; if (pos > Short.MAX_VALUE) throw new Error(); //write to new file final long logicalRowId = ((-pageid) << Storage.PAGE_SIZE_SHIFT) + (long) pos; //read from logical location in second db, //check if record was already inserted as part of collections if (db2._pageman.getLast(Magic.TRANSLATION_PAGE) <= pageid && db2._logicMgr.fetch(logicalRowId) != 0) { //yes, this record already exists in second db continue; } //get physical location in this db final long physRowId = io.pageHeaderGetLocation((short) pos); if (physRowId == 0) continue; if (physRowId == PREALOCATE_PHYS_RECID){ db2._logicMgr.forceInsert(logicalRowId, physRowId); continue; } //read from physical location at this db DataInputOutput b = new DataInputOutput(); _physMgr.fetch(b, physRowId); byte[] bb = b.toByteArray(); //force insert into other file, without decompressing logical id to external form long physLoc = db2._physMgr.insert(bb, 0, bb.length); db2._logicMgr.forceInsert(logicalRowId, physLoc); } _file.release(io); db2.commit(); } for(byte b = 0;b filesToDelete = new ArrayList(); //now rename old files String[] exts = {StorageDiskMapped.IDR, StorageDiskMapped.DBR}; for (String ext : exts) { String f1 = filename1 + ext; String f2 = filename2 + "_OLD" + ext; //first rename transaction log File f1t = new File(f1 + StorageDisk.transaction_log_file_extension); File f2t = new File(f2 + StorageDisk.transaction_log_file_extension); f1t.renameTo(f2t); filesToDelete.add(f2t); //rename data files, iterate until file exist for (int i = 0; ; i++) { File f1d = new File(f1 + "." + i); if (!f1d.exists()) break; File f2d = new File(f2 + "." + i); f1d.renameTo(f2d); filesToDelete.add(f2d); } } //rename new files for (String ext : exts) { String f1 = filename2 + ext; String f2 = filename1 + ext; //first rename transaction log File f1t = new File(f1 + StorageDisk.transaction_log_file_extension); File f2t = new File(f2 + StorageDisk.transaction_log_file_extension); f1t.renameTo(f2t); //rename data files, iterate until file exist for (int i = 0; ; i++) { File f1d = new File(f1 + "." + i); if (!f1d.exists()) break; File f2d = new File(f2 + "." + i); f1d.renameTo(f2d); } } for (File d : filesToDelete) { d.delete(); } reopen(); } catch (IOException e) { throw new IOError(e); } } /** * Insert data at forced logicalRowId, use only for defragmentation !! * * @param logicalRowId * @param data * @throws IOException */ void forceInsert(long logicalRowId, byte[] data) throws IOException { logicalRowId = decompressRecid(logicalRowId); if (needsAutoCommit()) { commit(); } long physLoc = _physMgr.insert(data, 0, data.length); _logicMgr.forceInsert(logicalRowId, physLoc); } /** * Returns number of records stored in database. * Is used for unit tests */ long countRecords() throws IOException { long counter = 0; long page = _pageman.getFirst(Magic.TRANSLATION_PAGE); while (page != 0) { PageIo io = _file.get(page); for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) { int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE; if (pos > Short.MAX_VALUE) throw new Error(); //get physical location long physRowId = io.pageHeaderGetLocation((short) pos); if (physRowId != 0) counter += 1; } _file.release(io); page = _pageman.getNext(page); } return counter; } private static int COMPRESS_RECID_PAGE_SHIFT = Integer.MIN_VALUE; static{ int shift = 1; while((1<>> (64- COMPRESS_RECID_PAGE_SHIFT); /** * Compress recid from physical form (block - offset) to (block - slot). * This way resulting number is smaller and can be easier packed with LongPacker */ static long compressRecid(final long recid) { final long page = recid>>> Storage.PAGE_SIZE_SHIFT; short offset = (short) (recid & Storage.OFFSET_MASK); offset = (short) (offset - Magic.PAGE_HEADER_SIZE); if (offset % Magic.PhysicalRowId_SIZE != 0) throw new InternalError("recid not dividable "+Magic.PhysicalRowId_SIZE); long slot = offset / Magic.PhysicalRowId_SIZE; return (page << COMPRESS_RECID_PAGE_SHIFT) + slot; } static long decompressRecid(final long recid) { final long page = recid >>> COMPRESS_RECID_PAGE_SHIFT; final short offset = (short) ((recid & COMPRESS_RECID_OFFSET_MASK) * Magic.PhysicalRowId_SIZE + Magic.PAGE_HEADER_SIZE); return (page << Storage.PAGE_SIZE_SHIFT) + (long) offset; } } ================================================ FILE: src/main/java/org/apache/jdbm/DataInputOutput.java ================================================ package org.apache.jdbm; import java.io.*; import java.nio.ByteBuffer; import java.util.Arrays; /** * Utility class which implements DataInput and DataOutput on top of byte[] buffer * with minimal overhead * * @author Jan Kotek */ class DataInputOutput implements DataInput, DataOutput, ObjectInput, ObjectOutput { private int pos = 0; private int count = 0; private byte[] buf; public DataInputOutput() { buf = new byte[8]; } public DataInputOutput(byte[] data) { buf = data; count = data.length; } public byte[] getBuf() { return buf; } public int getPos() { return pos; } public void reset() { pos = 0; count = 0; } public void resetForReading() { count = pos; pos = 0; } public void reset(byte[] b) { pos = 0; buf = b; count = b.length; } public byte[] toByteArray() { byte[] d = new byte[pos]; System.arraycopy(buf, 0, d, 0, pos); return d; } public int available() { return count - pos; } public void readFully(byte[] b) throws IOException { readFully(b, 0, b.length); } public void readFully(byte[] b, int off, int len) throws IOException { System.arraycopy(buf, pos, b, off, len); pos += len; } public int skipBytes(int n) throws IOException { pos += n; return n; } public boolean readBoolean() throws IOException { return buf[pos++] == 1; } public byte readByte() throws IOException { return buf[pos++]; } public int readUnsignedByte() throws IOException { return buf[pos++] & 0xff; } public short readShort() throws IOException { return (short) (((short) (buf[pos++] & 0xff) << 8) | ((short) (buf[pos++] & 0xff) << 0)); } public int readUnsignedShort() throws IOException { return (((int) (buf[pos++] & 0xff) << 8) | ((int) (buf[pos++] & 0xff) << 0)); } public char readChar() throws IOException { return (char) readInt(); } public int readInt() throws IOException { return (((buf[pos++] & 0xff) << 24) | ((buf[pos++] & 0xff) << 16) | ((buf[pos++] & 0xff) << 8) | ((buf[pos++] & 0xff) << 0)); } public long readLong() throws IOException { return (((long) (buf[pos++] & 0xff) << 56) | ((long) (buf[pos++] & 0xff) << 48) | ((long) (buf[pos++] & 0xff) << 40) | ((long) (buf[pos++] & 0xff) << 32) | ((long) (buf[pos++] & 0xff) << 24) | ((long) (buf[pos++] & 0xff) << 16) | ((long) (buf[pos++] & 0xff) << 8) | ((long) (buf[pos++] & 0xff) << 0)); } public float readFloat() throws IOException { return Float.intBitsToFloat(readInt()); } public double readDouble() throws IOException { return Double.longBitsToDouble(readLong()); } public String readLine() throws IOException { return readUTF(); } public String readUTF() throws IOException { return Serialization.deserializeString(this); } /** * make sure there will be enought space in buffer to write N bytes */ private void ensureAvail(int n) { if (pos + n >= buf.length) { int newSize = Math.max(pos + n, buf.length * 2); buf = Arrays.copyOf(buf, newSize); } } public void write(int b) throws IOException { ensureAvail(1); buf[pos++] = (byte) b; } public void write(byte[] b) throws IOException { write(b, 0, b.length); } public void write(byte[] b, int off, int len) throws IOException { ensureAvail(len); System.arraycopy(b, off, buf, pos, len); pos += len; } public void writeBoolean(boolean v) throws IOException { ensureAvail(1); buf[pos++] = (byte) (v ? 1 : 0); } public void writeByte(int v) throws IOException { ensureAvail(1); buf[pos++] = (byte) (v); } public void writeShort(int v) throws IOException { ensureAvail(2); buf[pos++] = (byte) (0xff & (v >> 8)); buf[pos++] = (byte) (0xff & (v >> 0)); } public void writeChar(int v) throws IOException { writeInt(v); } public void writeInt(int v) throws IOException { ensureAvail(4); buf[pos++] = (byte) (0xff & (v >> 24)); buf[pos++] = (byte) (0xff & (v >> 16)); buf[pos++] = (byte) (0xff & (v >> 8)); buf[pos++] = (byte) (0xff & (v >> 0)); } public void writeLong(long v) throws IOException { ensureAvail(8); buf[pos++] = (byte) (0xff & (v >> 56)); buf[pos++] = (byte) (0xff & (v >> 48)); buf[pos++] = (byte) (0xff & (v >> 40)); buf[pos++] = (byte) (0xff & (v >> 32)); buf[pos++] = (byte) (0xff & (v >> 24)); buf[pos++] = (byte) (0xff & (v >> 16)); buf[pos++] = (byte) (0xff & (v >> 8)); buf[pos++] = (byte) (0xff & (v >> 0)); } public void writeFloat(float v) throws IOException { ensureAvail(4); writeInt(Float.floatToIntBits(v)); } public void writeDouble(double v) throws IOException { ensureAvail(8); writeLong(Double.doubleToLongBits(v)); } public void writeBytes(String s) throws IOException { writeUTF(s); } public void writeChars(String s) throws IOException { writeUTF(s); } public void writeUTF(String s) throws IOException { Serialization.serializeString(this, s); } /** helper method to write data directly from PageIo*/ public void writeFromByteBuffer(ByteBuffer b, int offset, int length) { ensureAvail(length); b.position(offset); b.get(buf,pos,length); pos+=length; } //temp var used for Externalizable SerialClassInfo serializer; //temp var used for Externalizable Serialization.FastArrayList objectStack; public Object readObject() throws ClassNotFoundException, IOException { //is here just to implement ObjectInput //Fake method which reads data from serializer. //We could probably implement separate wrapper for this, but I want to safe class space return serializer.deserialize(this, objectStack); } public int read() throws IOException { //is here just to implement ObjectInput return readUnsignedByte(); } public int read(byte[] b) throws IOException { //is here just to implement ObjectInput readFully(b); return b.length; } public int read(byte[] b, int off, int len) throws IOException { //is here just to implement ObjectInput readFully(b,off,len); return len; } public long skip(long n) throws IOException { //is here just to implement ObjectInput pos += n; return n; } public void close() throws IOException { //is here just to implement ObjectInput //do nothing } public void writeObject(Object obj) throws IOException { //is here just to implement ObjectOutput serializer.serialize(this,obj,objectStack); } public void flush() throws IOException { //is here just to implement ObjectOutput //do nothing } } ================================================ FILE: src/main/java/org/apache/jdbm/DataInputOutput2.java ================================================ ///* //package org.apache.jdbm; // //import java.io.DataInput; //import java.io.DataOutput; //import java.io.IOException; //import java.nio.Buffer; //import java.nio.ByteBuffer; //import java.util.Arrays; // //*/ ///** // * Utility class which implements DataInput and DataOutput on top of ByteBuffer // * with minimal overhead // * This class is not used, is left here in case we would ever need it. // * // * @author Jan Kotek // *//* // //class DataInputOutput2 implements DataInput, DataOutput { // // private ByteBuffer buf; // // // public DataInputOutput2() { // buf = ByteBuffer.allocate(8); // } // // public DataInputOutput2(ByteBuffer data) { // buf = data; // } // // public DataInputOutput2(byte[] data) { // buf = ByteBuffer.wrap(data); // } // // // public int getPos() { // return buf.position(); // } // // // public void reset() { // buf.rewind(); // } // // // public void reset(byte[] b) { // buf = ByteBuffer.wrap(b); // } // // public void resetForReading() { // buf.flip(); // } // // // public byte[] toByteArray() { // byte[] d = new byte[buf.position()]; // buf.position(0); // buf.get(d); //reading N bytes restores to current position // // return d; // } // // public int available() { // return buf.remaining(); // } // // // public void readFully(byte[] b) throws IOException { // readFully(b, 0, b.length); // } // // public void readFully(byte[] b, int off, int len) throws IOException { // buf.get(b,off,len); // } // // public int skipBytes(int n) throws IOException { // buf.position(buf.position()+n); // return n; // } // // public boolean readBoolean() throws IOException { // return buf.get()==1; // } // // public byte readByte() throws IOException { // return buf.get(); // } // // public int readUnsignedByte() throws IOException { // return buf.get() & 0xff; // } // // public short readShort() throws IOException { // return buf.getShort(); // } // // public int readUnsignedShort() throws IOException { // return (((int) (buf.get() & 0xff) << 8) | // ((int) (buf.get() & 0xff) << 0)); // } // // public char readChar() throws IOException { // return (char) readInt(); // } // // public int readInt() throws IOException { // return buf.getInt(); // } // // public long readLong() throws IOException { // return buf.getLong(); // } // // public float readFloat() throws IOException { // return buf.getFloat(); // } // // public double readDouble() throws IOException { // return buf.getDouble(); // } // // public String readLine() throws IOException { // return readUTF(); // } // // public String readUTF() throws IOException { // return Serialization.deserializeString(this); // } // // */ ///** // * make sure there will be enough space in buffer to write N bytes // *//* // // private void ensureAvail(int n) { // int pos = buf.position(); // if (pos + n >= buf.limit()) { // int newSize = Math.max(pos + n, buf.limit() * 2); // byte[] b = new byte[newSize]; // buf.get(b); // buf = ByteBuffer.wrap(b); // buf.position(pos); // } // } // // // public void write(final int b) throws IOException { // ensureAvail(1); // buf.put((byte) b); // } // // public void write(final byte[] b) throws IOException { // write(b, 0, b.length); // } // // public void write(final byte[] b, final int off, final int len) throws IOException { // ensureAvail(len); // buf.put(b,off,len); // } // // public void writeBoolean(final boolean v) throws IOException { // ensureAvail(1); // buf.put((byte) (v?1:0)); // } // // public void writeByte(final int v) throws IOException { // ensureAvail(1); // buf.put((byte) v); // } // // public void writeShort(final short v) throws IOException { // ensureAvail(2); // buf.putShort(v); // } // // public void writeChar(final int v) throws IOException { // writeInt(v); // } // // public void writeInt(final int v) throws IOException { // ensureAvail(4); // buf.putInt(v); // } // // public void writeLong(final long v) throws IOException { // ensureAvail(8); // buf.putLong(v); // } // // public void writeFloat(final float v) throws IOException { // ensureAvail(4); // buf.putFloat(v); // } // // public void writeDouble(final double v) throws IOException { // ensureAvail(8); // buf.putDouble(v); // } // // public void writeBytes(String s) throws IOException { // writeUTF(s); // } // // public void writeChars(String s) throws IOException { // writeUTF(s); // } // // public void writeUTF(String s) throws IOException { // Serialization.serializeString(this, s); // } // //} //*/ ================================================ FILE: src/main/java/org/apache/jdbm/HTree.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.util.*; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; /** * Persistent HashMap implementation for DB. * Implemented as an H*Tree structure. * * @author Alex Boisvert * @author Jan Kotek */ class HTree extends AbstractMap implements ConcurrentMap { final Serializer SERIALIZER = new Serializer() { public Object deserialize(DataInput ds2) throws IOException { DataInputOutput ds = (DataInputOutput) ds2; try { int i = ds.readUnsignedByte(); if (i == SerializationHeader.HTREE_BUCKET) { //is HashBucket? HTreeBucket ret = new HTreeBucket(HTree.this); if (loadValues) ret.readExternal(ds); if (loadValues && ds.available() != 0) throw new InternalError("bytes left: " + ds.available()); return ret; } else if (i == SerializationHeader.HTREE_DIRECTORY) { HTreeDirectory ret = new HTreeDirectory(HTree.this); ret.readExternal(ds); if (loadValues && ds.available() != 0) throw new InternalError("bytes left: " + ds.available()); return ret; } else { throw new InternalError("Wrong HTree header: " + i); } } catch (ClassNotFoundException e) { throw new IOException(e); } } public void serialize(DataOutput out, Object obj) throws IOException { if (obj instanceof HTreeBucket) { out.write(SerializationHeader.HTREE_BUCKET); HTreeBucket b = (HTreeBucket) obj; b.writeExternal(out); } else { out.write(SerializationHeader.HTREE_DIRECTORY); HTreeDirectory n = (HTreeDirectory) obj; n.writeExternal(out); } } }; final protected ReadWriteLock lock = new ReentrantReadWriteLock(); /** * Listeners which are notified about changes in records */ protected RecordListener[] recordListeners = new RecordListener[0]; /** * Serializer used to serialize index keys (optional) */ protected Serializer keySerializer; /** * Serializer used to serialize index values (optional) */ protected Serializer valueSerializer; protected boolean readonly = false; final long rootRecid; DBAbstract db; /** if false map contains only keys, used for set*/ boolean hasValues = true; /** * counts structural changes in tree at runtume. Is here to support fail-fast behaviour. */ int modCount; /** * indicates if values should be loaded during deserialization, set to true during defragmentation */ private boolean loadValues = true; public Serializer getKeySerializer() { return keySerializer; } public Serializer getValueSerializer() { return valueSerializer; } /** * cache writing buffer, so it does not have to be allocated on each write */ AtomicReference writeBufferCache = new AtomicReference(); /** * Create a persistent hashtable. */ public HTree(DBAbstract db, Serializer keySerializer, Serializer valueSerializer, boolean hasValues) throws IOException { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; this.db = db; this.hasValues = hasValues; HTreeDirectory root = new HTreeDirectory(this, (byte) 0); root.setPersistenceContext(0); this.rootRecid = db.insert(root, this.SERIALIZER,false); } /** * Load a persistent hashtable */ public HTree(DBAbstract db,long rootRecid, Serializer keySerializer, Serializer valueSerializer, boolean hasValues) throws IOException { this.db = db; this.rootRecid = rootRecid; this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; this.hasValues = hasValues; } void setPersistenceContext(DBAbstract db) { this.db = db; } public V put(K key, V value) { if (readonly) throw new UnsupportedOperationException("readonly"); lock.writeLock().lock(); try { if (key == null || value == null) throw new NullPointerException("Null key or value"); V oldVal = (V) getRoot().put(key, value); if (oldVal == null) { modCount++; //increase size HTreeDirectory root = getRoot(); root.size++; db.update(rootRecid,root,SERIALIZER); for (RecordListener r : recordListeners) r.recordInserted(key, value); } else { //notify listeners for (RecordListener r : recordListeners) r.recordUpdated(key, oldVal, value); } return oldVal; } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } public V get(Object key) { if (key == null) return null; lock.readLock().lock(); try { return getRoot().get((K) key); } catch (ClassCastException e) { return null; } catch (IOException e) { throw new IOError(e); }finally { lock.readLock().unlock(); } } public V remove(Object key) { if (readonly) throw new UnsupportedOperationException("readonly"); lock.writeLock().lock(); try { if (key == null) return null; V val = (V) getRoot().remove(key); modCount++; if (val != null){ //decrease size HTreeDirectory root = getRoot(); root.size--; db.update(rootRecid,root,SERIALIZER); for (RecordListener r : recordListeners) r.recordRemoved(key, val); } return val; } catch (ClassCastException e) { return null; } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } public boolean containsKey(Object key) { if (key == null) return false; //no need for locking, get is already locked V v = get((K) key); return v != null; } public void clear() { lock.writeLock().lock(); try { Iterator keyIter = keys(); while (keyIter.hasNext()) { keyIter.next(); keyIter.remove(); } } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } /** * Returns an enumeration of the keys contained in this */ public Iterator keys() throws IOException { lock.readLock().lock(); try{ return getRoot().keys(); }finally { lock.readLock().unlock(); } } public DBAbstract getRecordManager() { return db; } /** * add RecordListener which is notified about record changes * * @param listener */ public void addRecordListener(RecordListener listener) { recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1); recordListeners[recordListeners.length - 1] = listener; } /** * remove RecordListener which is notified about record changes * * @param listener */ public void removeRecordListener(RecordListener listener) { List l = Arrays.asList(recordListeners); l.remove(listener); recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]); } public Set> entrySet() { return _entrySet; } private Set> _entrySet = new AbstractSet>() { protected Entry newEntry(K k, V v) { return new SimpleEntry(k, v) { private static final long serialVersionUID = 978651696969194154L; public V setValue(V arg0) { //put is already locked HTree.this.put(getKey(), arg0); return super.setValue(arg0); } }; } public boolean add(java.util.Map.Entry e) { if (readonly) throw new UnsupportedOperationException("readonly"); if (e.getKey() == null) throw new NullPointerException("Can not add null key"); lock.writeLock().lock(); try{ if (e.getValue().equals(get(e.getKey()))) return false; HTree.this.put(e.getKey(), e.getValue()); return true; }finally { lock.writeLock().unlock(); } } @SuppressWarnings("unchecked") public boolean contains(Object o) { if (o instanceof Entry) { Entry e = (java.util.Map.Entry) o; //get is already locked if (e.getKey() != null && HTree.this.get(e.getKey()) != null) return true; } return false; } public Iterator> iterator() { try { final Iterator br = keys(); return new Iterator>() { public boolean hasNext() { return br.hasNext(); } public java.util.Map.Entry next() { K k = br.next(); return newEntry(k, get(k)); } public void remove() { if (readonly) throw new UnsupportedOperationException("readonly"); br.remove(); } }; } catch (IOException e) { throw new IOError(e); } } @SuppressWarnings("unchecked") public boolean remove(Object o) { if (readonly) throw new UnsupportedOperationException("readonly"); if (o instanceof Entry) { Entry e = (java.util.Map.Entry) o; //check for nulls if (e.getKey() == null || e.getValue() == null) return false; lock.writeLock().lock(); try{ //get old value, must be same as item in entry V v = get(e.getKey()); if (v == null || !e.getValue().equals(v)) return false; HTree.this.remove(e.getKey()); return true; }finally{ lock.writeLock().unlock(); } } return false; } @Override public int size() { lock.readLock().lock(); try { int counter = 0; Iterator it = keys(); while (it.hasNext()) { it.next(); counter++; } return counter; } catch (IOException e) { throw new IOError(e); }finally { lock.readLock().unlock(); } } }; HTreeDirectory getRoot() { //assumes that caller already holds read or write lock try { HTreeDirectory root = (HTreeDirectory) db.fetch(rootRecid, this.SERIALIZER); root.setPersistenceContext(rootRecid); return root; } catch (IOException e) { throw new IOError(e); } } public static HTree deserialize(DataInput is, Serialization ser) throws IOException, ClassNotFoundException { long rootRecid = LongPacker.unpackLong(is); boolean hasValues = is.readBoolean(); Serializer keySerializer = (Serializer) ser.deserialize(is); Serializer valueSerializer = (Serializer) ser.deserialize(is); return new HTree(ser.db,rootRecid, keySerializer, valueSerializer, hasValues); } void serialize(DataOutput out) throws IOException { LongPacker.packLong(out, rootRecid); out.writeBoolean(hasValues);; db.defaultSerializer().serialize(out, keySerializer); db.defaultSerializer().serialize(out, valueSerializer); } static void defrag(Long recid, DBStore r1, DBStore r2) throws IOException { //TODO should modCount be increased after defrag, revert or commit? try { byte[] data = r1.fetchRaw(recid); r2.forceInsert(recid, data); DataInput in = new DataInputStream(new ByteArrayInputStream(data)); HTree t = (HTree) r1.defaultSerializer().deserialize(in); t.db = r1; t.loadValues = false; HTreeDirectory d = t.getRoot(); if (d != null) { r2.forceInsert(t.rootRecid, r1.fetchRaw(t.rootRecid)); d.defrag(r1, r2); } } catch (ClassNotFoundException e) { throw new IOError(e); } } public int size(){ return (int) getRoot().size; } public boolean hasValues() { return hasValues; } public V putIfAbsent(K key, V value) { lock.writeLock().lock(); try{ if (!containsKey(key)) return put(key, value); else return get(key); }finally { lock.writeLock().unlock(); } } public boolean remove(Object key, Object value) { lock.writeLock().lock(); try{ if (containsKey(key) && get(key).equals(value)) { remove(key); return true; } else return false; }finally { lock.writeLock().unlock(); } } public boolean replace(K key, V oldValue, V newValue) { lock.writeLock().lock(); try{ if (containsKey(key) && get(key).equals(oldValue)) { put(key, newValue); return true; } else return false; }finally { lock.writeLock().unlock(); } } public V replace(K key, V value) { lock.writeLock().lock(); try{ if (containsKey(key)) { return put(key, value); } else return null; }finally { lock.writeLock().unlock(); } } } ================================================ FILE: src/main/java/org/apache/jdbm/HTreeBucket.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.util.ArrayList; /** * A bucket is a placeholder for multiple (key, value) pairs. Buckets * are used to store collisions (same hash value) at all levels of an * H*tree. *

* There are two types of buckets: leaf and non-leaf. *

* Non-leaf buckets are buckets which hold collisions which happen * when the H*tree is not fully expanded. Keys in a non-leaf buckets * can have different hash codes. Non-leaf buckets are limited to an * arbitrary size. When this limit is reached, the H*tree should create * a new HTreeDirectory node and distribute keys of the non-leaf buckets into * the newly created HTreeDirectory. *

* A leaf bucket is a bucket which contains keys which all have * the same hashCode(). Leaf buckets stand at the * bottom of an H*tree because the hashing algorithm cannot further * discriminate between different keys based on their hash code. * * @author Alex Boisvert */ final class HTreeBucket { /** * The maximum number of elements (key, value) a non-leaf bucket * can contain. */ public static final int OVERFLOW_SIZE = 16; /** * Depth of this bucket. */ private byte _depth; /** * Keys and values in this bucket. Keys are followed by values at KEYPOS+OVERFLOW_SIZE */ private Object[] _keysAndValues; private byte size = 0; private final HTree tree; /** * Public constructor for serialization. */ public HTreeBucket(HTree tree) { this.tree = tree; } /** * Construct a bucket with a given depth level. Depth level is the * number of HashDirectory above this bucket. */ public HTreeBucket(HTree tree, byte level) { this.tree = tree; if (level > HTreeDirectory.MAX_DEPTH + 1) { throw new IllegalArgumentException( "Cannot create bucket with depth > MAX_DEPTH+1. " + "Depth=" + level); } _depth = level; _keysAndValues = new Object[OVERFLOW_SIZE * 2]; } /** * Returns the number of elements contained in this bucket. */ public int getElementCount() { return size; } /** * Returns whether or not this bucket is a "leaf bucket". */ public boolean isLeaf() { return (_depth > HTreeDirectory.MAX_DEPTH); } /** * Returns true if bucket can accept at least one more element. */ public boolean hasRoom() { if (isLeaf()) { return true; // leaf buckets are never full } else { // non-leaf bucket return (size < OVERFLOW_SIZE); } } /** * Add an element (key, value) to this bucket. If an existing element * has the same key, it is replaced silently. * * @return Object which was previously associated with the given key * or null if no association existed. */ public V addElement(K key, V value) { //find entry byte existing = -1; for (byte i = 0; i < size; i++) { if (key.equals(_keysAndValues[i])) { existing = i; break; } } if (existing != -1) { // replace existing element Object before = _keysAndValues[existing + OVERFLOW_SIZE]; if (before instanceof BTreeLazyRecord) { BTreeLazyRecord rec = (BTreeLazyRecord) before; before = rec.get(); rec.delete(); } _keysAndValues[existing + OVERFLOW_SIZE] = value; return (V) before; } else { // add new (key, value) pair _keysAndValues[size] = key; _keysAndValues[size + OVERFLOW_SIZE] = value; size++; return null; } } /** * Remove an element, given a specific key. * * @param key Key of the element to remove * @return Removed element value, or null if not found */ public V removeElement(K key) { //find entry byte existing = -1; for (byte i = 0; i < size; i++) { if (key.equals(_keysAndValues[i])) { existing = i; break; } } if (existing != -1) { Object o = _keysAndValues[existing + OVERFLOW_SIZE]; if (o instanceof BTreeLazyRecord) { BTreeLazyRecord rec = (BTreeLazyRecord) o; o = rec.get(); rec.delete(); } //move last element to existing size--; _keysAndValues[existing] = _keysAndValues[size]; _keysAndValues[existing + OVERFLOW_SIZE] = _keysAndValues[size + OVERFLOW_SIZE]; //and unset last element _keysAndValues[size] = null; _keysAndValues[size + OVERFLOW_SIZE] = null; return (V) o; } else { // not found return null; } } /** * Returns the value associated with a given key. If the given key * is not found in this bucket, returns null. */ public V getValue(K key) { //find entry byte existing = -1; for (byte i = 0; i < size; i++) { if (key.equals(_keysAndValues[i])) { existing = i; break; } } if (existing != -1) { Object o = _keysAndValues[existing + OVERFLOW_SIZE]; if (o instanceof BTreeLazyRecord) return ((BTreeLazyRecord) o).get(); else return (V) o; } else { // key not found return null; } } /** * Obtain keys contained in this buckets. Keys are ordered to match * their values, which be be obtained by calling getValues(). *

* As an optimization, the Vector returned is the instance member * of this class. Please don't modify outside the scope of this class. */ ArrayList getKeys() { ArrayList ret = new ArrayList(); for (byte i = 0; i < size; i++) { ret.add((K) _keysAndValues[i]); } return ret; } /** * Obtain values contained in this buckets. Values are ordered to match * their keys, which be be obtained by calling getKeys(). *

* As an optimization, the Vector returned is the instance member * of this class. Please don't modify outside the scope of this class. */ ArrayList getValues() { ArrayList ret = new ArrayList(); for (byte i = 0; i < size; i++) { ret.add((V) _keysAndValues[i + OVERFLOW_SIZE]); } return ret; } public void writeExternal(DataOutput out) throws IOException { out.write(_depth); out.write(size); DataInputOutput out3 = tree.writeBufferCache.getAndSet(null); if (out3 == null) out3 = new DataInputOutput(); else out3.reset(); Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer : tree.getRecordManager().defaultSerializer(); for (byte i = 0; i < size; i++) { out3.reset(); keySerializer.serialize(out3, _keysAndValues[i]); LongPacker.packInt(out, out3.getPos()); out.write(out3.getBuf(), 0, out3.getPos()); } //write values if(tree.hasValues()){ Serializer valSerializer = tree.valueSerializer != null ? tree.valueSerializer : tree.getRecordManager().defaultSerializer(); for (byte i = 0; i < size; i++) { Object value = _keysAndValues[i + OVERFLOW_SIZE]; if (value == null) { out.write(BTreeLazyRecord.NULL); } else if (value instanceof BTreeLazyRecord) { out.write(BTreeLazyRecord.LAZY_RECORD); LongPacker.packLong(out, ((BTreeLazyRecord) value).recid); } else { //transform to byte array out3.reset(); valSerializer.serialize(out3, value); if (out3.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) { //store as separate record long recid = tree.getRecordManager().insert(out3.toByteArray(), BTreeLazyRecord.FAKE_SERIALIZER,true); out.write(BTreeLazyRecord.LAZY_RECORD); LongPacker.packLong(out, recid); } else { out.write(out3.getPos()); out.write(out3.getBuf(), 0, out3.getPos()); } } } } tree.writeBufferCache.set(out3); } public void readExternal(DataInputOutput in) throws IOException, ClassNotFoundException { _depth = in.readByte(); size = in.readByte(); //read keys Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer : tree.getRecordManager().defaultSerializer(); _keysAndValues = (K[]) new Object[OVERFLOW_SIZE * 2]; for (byte i = 0; i < size; i++) { int expectedSize = LongPacker.unpackInt(in); K key = (K) BTreeLazyRecord.fastDeser(in, keySerializer, expectedSize); _keysAndValues[i] = key; } //read values if(tree.hasValues()){ Serializer valSerializer = tree.valueSerializer != null ? tree.valueSerializer : (Serializer) tree.getRecordManager().defaultSerializer(); for (byte i = 0; i < size; i++) { int header = in.readUnsignedByte(); if (header == BTreeLazyRecord.NULL) { _keysAndValues[i + OVERFLOW_SIZE] = null; } else if (header == BTreeLazyRecord.LAZY_RECORD) { long recid = LongPacker.unpackLong(in); _keysAndValues[i + OVERFLOW_SIZE] = (new BTreeLazyRecord(tree.getRecordManager(), recid, valSerializer)); } else { _keysAndValues[i + OVERFLOW_SIZE] = BTreeLazyRecord.fastDeser(in, valSerializer, header); } } }else{ for (byte i = 0; i < size; i++) { if(_keysAndValues[i]!=null) _keysAndValues[i+OVERFLOW_SIZE] = Utils.EMPTY_STRING; } } } } ================================================ FILE: src/main/java/org/apache/jdbm/HTreeDirectory.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.util.*; /** * Hashtable directory page. * * @author Alex Boisvert */ final class HTreeDirectory { /** * Maximum number of children in a directory. *

* (Must be a power of 2 -- if you update this value, you must also * update BIT_SIZE and MAX_DEPTH.) *

* !!!! do not change this, it affects storage format, there are also magic numbers which relies on 255 !!! */ static final int MAX_CHILDREN = 256; /** * Number of significant bits per directory level. */ static final int BIT_SIZE = 8; // log2(256) = 8 /** * Maximum number of levels (zero-based) *

* (4 * 8 bits = 32 bits, which is the size of an "int", and as * you know, hashcodes in Java are "ints") */ static final int MAX_DEPTH = 3; // 4 levels /** * Record ids of children nodes. * It is saved in matrix to save memory, some subarrays may be null. */ private long[][] _children; /** * Depth of this directory page, zero-based */ private byte _depth; /** * This directory's record ID in the DB. (transient) */ private long _recid; /** if this is root (depth=0), it contains size, otherwise -1*/ long size; protected final HTree tree; /** * Public constructor used by serialization */ public HTreeDirectory(HTree tree) { this.tree = tree; } /** * Construct a HashDirectory * * @param depth Depth of this directory node. */ HTreeDirectory(HTree tree, byte depth) { this.tree = tree; _depth = depth; _children = new long[32][]; } /** * Sets persistence context. This method must be called before any * persistence-related operation. * * @param recid Record id of this directory. */ void setPersistenceContext(long recid) { this._recid = recid; } /** * Get the record identifier used to load this hashtable. */ long getRecid() { return _recid; } /** * Returns whether or not this directory is empty. A directory * is empty when it no longer contains buckets or sub-directories. */ boolean isEmpty() { for (int i = 0; i < _children.length; i++) { long[] sub = _children[i]; if (sub!=null){ for (int j = 0; j < 8; j++) { if(sub[j] != 0) { return false; } } } } return true; } /** * Returns the value which is associated with the given key. Returns * null if there is not association for this key. * * @param key key whose associated value is to be returned */ V get(K key) throws IOException { int hash = hashCode(key); long child_recid = getRecid(hash); if (child_recid == 0) { // not bucket/node --> not found return null; } else { Object node = tree.db.fetch(child_recid, tree.SERIALIZER); // System.out.println("HashDirectory.get() child is : "+node); if (node instanceof HTreeDirectory) { // recurse into next directory level HTreeDirectory dir = (HTreeDirectory) node; dir.setPersistenceContext(child_recid); return dir.get(key); } else { // node is a bucket HTreeBucket bucket = (HTreeBucket) node; return bucket.getValue(key); } } } private long getRecid(int hash) { long[] sub = _children[hash>>>3]; return sub==null? 0 : sub[hash%8]; } private void putRecid(int hash, long recid) { long[] sub = _children[hash>>>3]; if(sub == null){ sub = new long[8]; _children[hash>>>3] = sub; } sub[hash%8] = recid; } /** * Associates the specified value with the specified key. * * @param key key with which the specified value is to be assocated. * @param value value to be associated with the specified key. * @return object which was previously associated with the given key, * or null if no association existed. */ Object put(final Object key, final Object value) throws IOException { if (value == null) { return remove(key); } int hash = hashCode(key); long child_recid = getRecid(hash); if (child_recid == 0) { // no bucket/node here yet, let's create a bucket HTreeBucket bucket = new HTreeBucket(tree, (byte) (_depth + 1)); // insert (key,value) pair in bucket Object existing = bucket.addElement(key, value); long b_recid = tree.db.insert(bucket, tree.SERIALIZER,false); putRecid(hash, b_recid); tree.db.update(_recid, this, tree.SERIALIZER); // System.out.println("Added: "+bucket); return existing; } else { Object node = tree.db.fetch(child_recid, tree.SERIALIZER); if (node instanceof HTreeDirectory) { // recursive insert in next directory level HTreeDirectory dir = (HTreeDirectory) node; dir.setPersistenceContext(child_recid); return dir.put(key, value); } else { // node is a bucket HTreeBucket bucket = (HTreeBucket) node; if (bucket.hasRoom()) { Object existing = bucket.addElement(key, value); tree.db.update(child_recid, bucket, tree.SERIALIZER); // System.out.println("Added: "+bucket); return existing; } else { // overflow, so create a new directory if (_depth == MAX_DEPTH) { throw new RuntimeException("Cannot create deeper directory. " + "Depth=" + _depth); } HTreeDirectory dir = new HTreeDirectory(tree, (byte) (_depth + 1)); long dir_recid = tree.db.insert(dir, tree.SERIALIZER,false); dir.setPersistenceContext(dir_recid); putRecid(hash, dir_recid); tree.db.update(_recid, this, tree.SERIALIZER); // discard overflown bucket tree.db.delete(child_recid); // migrate existing bucket elements ArrayList keys = bucket.getKeys(); ArrayList values = bucket.getValues(); int entries = keys.size(); for (int i = 0; i < entries; i++) { dir.put(keys.get(i), values.get(i)); } // (finally!) insert new element return dir.put(key, value); } } } } /** * Remove the value which is associated with the given key. If the * key does not exist, this method simply ignores the operation. * * @param key key whose associated value is to be removed * @return object which was associated with the given key, or * null if no association existed with given key. */ Object remove(Object key) throws IOException { int hash = hashCode(key); long child_recid = getRecid(hash); if (child_recid == 0) { // not bucket/node --> not found return null; } else { Object node = tree.db.fetch(child_recid, tree.SERIALIZER); // System.out.println("HashDirectory.remove() child is : "+node); if (node instanceof HTreeDirectory) { // recurse into next directory level HTreeDirectory dir = (HTreeDirectory) node; dir.setPersistenceContext(child_recid); Object existing = dir.remove(key); if (existing != null) { if (dir.isEmpty()) { // delete empty directory tree.db.delete(child_recid); putRecid(hash, 0); tree.db.update(_recid, this, tree.SERIALIZER); } } return existing; } else { // node is a bucket HTreeBucket bucket = (HTreeBucket) node; Object existing = bucket.removeElement(key); if (existing != null) { if (bucket.getElementCount() >= 1) { tree.db.update(child_recid, bucket, tree.SERIALIZER); } else { // delete bucket, it's empty tree.db.delete(child_recid); putRecid(hash, 0); tree.db.update(_recid, this, tree.SERIALIZER); } } return existing; } } } /** * Calculates the hashcode of a key, based on the current directory * depth. */ private int hashCode(Object key) { int hashMask = hashMask(); int hash = key.hashCode(); hash = hash & hashMask; hash = hash >>> ((MAX_DEPTH - _depth) * BIT_SIZE); hash = hash % MAX_CHILDREN; /* System.out.println("HashDirectory.hashCode() is: 0x" +Integer.toHexString(hash) +" for object hashCode() 0x" +Integer.toHexString(key.hashCode())); */ return hash; } /** * Calculates the hashmask of this directory. The hashmask is the * bit mask applied to a hashcode to retain only bits that are * relevant to this directory level. */ int hashMask() { int bits = MAX_CHILDREN - 1; int hashMask = bits << ((MAX_DEPTH - _depth) * BIT_SIZE); /* System.out.println("HashDirectory.hashMask() is: 0x" +Integer.toHexString(hashMask)); */ return hashMask; } /** * Returns an enumeration of the keys contained in this */ Iterator keys() throws IOException { return new HDIterator(true); } /** * Returns an enumeration of the values contained in this */ Iterator values() throws IOException { return new HDIterator(false); } public void writeExternal(DataOutput out) throws IOException { out.writeByte(_depth); if(_depth==0){ LongPacker.packLong(out,size); } int zeroStart = 0; for (int i = 0; i < MAX_CHILDREN; i++) { if (getRecid(i) != 0) { zeroStart = i; break; } } out.write(zeroStart); if (zeroStart == MAX_CHILDREN) return; int zeroEnd = 0; for (int i = MAX_CHILDREN - 1; i >= 0; i--) { if (getRecid(i) != 0) { zeroEnd = i; break; } } out.write(zeroEnd); for (int i = zeroStart; i <= zeroEnd; i++) { LongPacker.packLong(out, getRecid(i)); } } public void readExternal(DataInputOutput in) throws IOException, ClassNotFoundException { _depth = in.readByte(); if(_depth==0) size = LongPacker.unpackLong(in); else size = -1; _children = new long[32][]; int zeroStart = in.readUnsignedByte(); int zeroEnd = in.readUnsignedByte(); for (int i = zeroStart; i <= zeroEnd; i++) { long recid = LongPacker.unpackLong(in); if(recid!=0) putRecid(i,recid); } } public void defrag(DBStore r1, DBStore r2) throws IOException, ClassNotFoundException { for (long[] sub: _children) { if(sub==null) continue; for (long child : sub) { if (child == 0) continue; byte[] data = r1.fetchRaw(child); r2.forceInsert(child, data); Object t = tree.SERIALIZER.deserialize(new DataInputOutput(data)); if (t instanceof HTreeDirectory) { ((HTreeDirectory) t).defrag(r1, r2); } } } } void deleteAllChildren() throws IOException { for(long[] ll : _children){ if(ll!=null){ for(long l:ll ){ if(l!=0){ tree.db.delete(l); } } } } } //////////////////////////////////////////////////////////////////////// // INNER CLASS //////////////////////////////////////////////////////////////////////// /** * Utility class to enumerate keys/values in a HTree */ class HDIterator implements Iterator { /** * True if we're iterating on keys, False if enumerating on values. */ private boolean _iterateKeys; /** * Stacks of directories & last enumerated child position */ private ArrayList _dirStack; private ArrayList _childStack; /** * Current HashDirectory in the hierarchy */ private HTreeDirectory _dir; /** * Current child position */ private int _child; /** * Current bucket iterator */ private Iterator _iter; private A next; /** * last item returned in next(), is used to remove() last item */ private A last; private int expectedModCount; /** * Construct an iterator on this directory. * * @param iterateKeys True if iteration supplies keys, False * if iterateKeys supplies values. */ HDIterator(boolean iterateKeys) throws IOException { _dirStack = new ArrayList(); _childStack = new ArrayList(); _dir = HTreeDirectory.this; _child = -1; _iterateKeys = iterateKeys; expectedModCount = tree.modCount; prepareNext(); next = next2(); } /** * Returns the next object. */ public A next2() { A next = null; if (_iter != null && _iter.hasNext()) { next = _iter.next(); } else { try { prepareNext(); } catch (IOException except) { throw new IOError(except); } if (_iter != null && _iter.hasNext()) { return next2(); } } return next; } /** * Prepare internal state so we can answer hasMoreElements *

* Actually, this code prepares an Enumeration on the next * Bucket to enumerate. If no following bucket is found, * the next Enumeration is set to null. */ private void prepareNext() throws IOException { long child_recid = 0; // get next bucket/directory to enumerate do { _child++; if (_child >= MAX_CHILDREN) { if (_dirStack.isEmpty()) { // no more directory in the stack, we're finished return; } // try next node _dir = (HTreeDirectory) _dirStack.remove(_dirStack.size() - 1); _child = ((Integer) _childStack.remove(_childStack.size() - 1)).intValue(); continue; } child_recid = _dir.getRecid(_child); } while (child_recid == 0); if (child_recid == 0) { throw new Error("child_recid cannot be 0"); } Object node = tree.db.fetch(child_recid, tree.SERIALIZER); // System.out.println("HDEnumeration.get() child is : "+node); if (node instanceof HTreeDirectory) { // save current position _dirStack.add(_dir); _childStack.add(new Integer(_child)); _dir = (HTreeDirectory) node; _child = -1; // recurse into _dir.setPersistenceContext(child_recid); prepareNext(); } else { // node is a bucket HTreeBucket bucket = (HTreeBucket) node; if (_iterateKeys) { ArrayList keys2 = bucket.getKeys(); _iter = keys2.iterator(); } else { _iter = bucket.getValues().iterator(); } } } public boolean hasNext() { return next != null; } public A next() { if (next == null) throw new NoSuchElementException(); if (expectedModCount != tree.modCount) throw new ConcurrentModificationException(); last = next; next = next2(); return last; } public void remove() { if (last == null) throw new IllegalStateException(); if (expectedModCount != tree.modCount) throw new ConcurrentModificationException(); //TODO current delete behaviour may change node layout. INVESTIGATE if this can happen! tree.remove(last); last = null; expectedModCount++; } } } ================================================ FILE: src/main/java/org/apache/jdbm/HTreeSet.java ================================================ package org.apache.jdbm; import java.util.AbstractSet; import java.util.Iterator; /** * Wrapper for HTree to implement java.util.Map interface */ class HTreeSet extends AbstractSet { final HTree map; HTreeSet(HTree map) { this.map = map; } public Iterator iterator() { return map.keySet().iterator(); } public int size() { return map.size(); } public boolean isEmpty() { return map.isEmpty(); } public boolean contains(Object o) { return map.containsKey(o); } public boolean add(E e) { return map.put(e, Utils.EMPTY_STRING) == null; } public boolean remove(Object o) { return map.remove(o) == Utils.EMPTY_STRING; } public void clear() { map.clear(); } } ================================================ FILE: src/main/java/org/apache/jdbm/LinkedList2.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.io.*; import java.util.*; import java.util.concurrent.locks.ReentrantReadWriteLock; /** * LinkedList2 which stores its nodes on disk. * * @author Jan Kotek */ class LinkedList2 extends AbstractSequentialList { private DBAbstract db; final long rootRecid; /** size limit, is not currently used, but needs to be here for future compatibility. * Zero means no limit. */ long sizeLimit = 0; private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); static final class Root{ long first; long last; long size; } private static final Serializer ROOT_SERIALIZER= new Serializer(){ public void serialize(DataOutput out, Root obj) throws IOException { LongPacker.packLong(out,obj.first); LongPacker.packLong(out,obj.last); LongPacker.packLong(out,obj.size); } public Root deserialize(DataInput in) throws IOException, ClassNotFoundException { Root r = new Root(); r.first = LongPacker.unpackLong(in); r.last = LongPacker.unpackLong(in); r.size = LongPacker.unpackLong(in); return r; } }; private Serializer valueSerializer; /** * indicates that entry values should not be loaded during deserialization, used during defragmentation */ protected boolean loadValues = true; /** constructor used for deserialization */ LinkedList2(DBAbstract db,long rootRecid, Serializer valueSerializer) { this.db = db; this.rootRecid = rootRecid; this.valueSerializer = valueSerializer; } /** constructor used to create new empty list*/ LinkedList2(DBAbstract db, Serializer valueSerializer) throws IOException { this.db = db; if (valueSerializer != null && !(valueSerializer instanceof Serializable)) throw new IllegalArgumentException("Serializer does not implement Serializable"); this.valueSerializer = valueSerializer; //create root this.rootRecid = db.insert(new Root(), ROOT_SERIALIZER,false); } void setPersistenceContext(DBAbstract db) { this.db = db; } public ListIterator listIterator(int index) { lock.readLock().lock(); try{ Root r = getRoot(); if (index < 0 || index > r.size) throw new IndexOutOfBoundsException(); Iter iter = new Iter(); iter.next = r.first; //scroll to requested position //TODO scroll from end, if beyond half for (int i = 0; i < index; i++) { iter.next(); } return iter; }finally { lock.readLock().unlock(); } } Root getRoot(){ //expect that caller already holds lock try { return db.fetch(rootRecid,ROOT_SERIALIZER); } catch (IOException e) { throw new IOError(e); } } public int size() { lock.readLock().lock(); try{ return (int) getRoot().size; }finally { lock.readLock().unlock(); } } public Iterator descendingIterator() { return null; //To change body of implemented methods use File | Settings | File Templates. } public boolean add(Object value) { lock.writeLock().lock(); try { Root r = getRoot(); Entry e = new Entry(r.last, 0, value); long recid = db.insert(e, entrySerializer,false); //update old last Entry to point to new record if (r.last != 0) { Entry oldLast = db.fetch(r.last, entrySerializer); if (oldLast.next != 0) throw new Error(); oldLast.next = recid; db.update(r.last, oldLast, entrySerializer); } //update linked list r.last = recid; if (r.first == 0) r.first = recid; r.size++; db.update(rootRecid, r, ROOT_SERIALIZER); modCount++; return true; } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } private Entry fetch(long recid) { lock.readLock().lock(); try { return db.fetch(recid, entrySerializer); } catch (IOException e) { throw new IOError(e); }finally { lock.readLock().unlock(); } } /** * called from Serialization object */ static LinkedList2 deserialize(DataInput is, Serialization ser) throws IOException, ClassNotFoundException { long rootrecid = LongPacker.unpackLong(is); long sizeLimit = LongPacker.unpackLong(is); if(sizeLimit!=0) throw new InternalError("LinkedList.sizeLimit not supported in this JDBM version"); Serializer serializer = (Serializer) ser.deserialize(is); return new LinkedList2(ser.db,rootrecid, serializer); } void serialize(DataOutput out) throws IOException { LongPacker.packLong(out, rootRecid); LongPacker.packLong(out, sizeLimit); db.defaultSerializer().serialize(out, valueSerializer); } private final Serializer entrySerializer = new Serializer() { public void serialize(DataOutput out, Entry e) throws IOException { LongPacker.packLong(out, e.prev); LongPacker.packLong(out, e.next); if (valueSerializer != null) valueSerializer.serialize(out, (E) e.value); else db.defaultSerializer().serialize(out, e.value); } public Entry deserialize(DataInput in) throws IOException, ClassNotFoundException { long prev = LongPacker.unpackLong(in); long next = LongPacker.unpackLong(in); Object value = null; if (loadValues) value = valueSerializer == null ? db.defaultSerializer().deserialize(in) : valueSerializer.deserialize(in); return new LinkedList2.Entry(prev, next, value); } }; static class Entry { long prev = 0; long next = 0; E value; public Entry(long prev, long next, E value) { this.prev = prev; this.next = next; this.value = value; } } private final class Iter implements ListIterator { private int expectedModCount = modCount; private int index = 0; private long prev = 0; private long next = 0; private byte lastOper = 0; public boolean hasNext() { return next != 0; } public E next() { if (next == 0) throw new NoSuchElementException(); checkForComodification(); Entry e = fetch(next); prev = next; next = e.next; index++; lastOper = +1; return e.value; } public boolean hasPrevious() { return prev != 0; } public E previous() { checkForComodification(); Entry e = fetch(prev); next = prev; prev = e.prev; index--; lastOper = -1; return e.value; } public int nextIndex() { return index; } public int previousIndex() { return index - 1; } public void remove() { checkForComodification(); lock.writeLock().lock(); try { if (lastOper == 1) { //last operation was next() so remove previous element lastOper = 0; Entry p = db.fetch(prev, entrySerializer); //update entry before previous if (p.prev != 0) { Entry pp = db.fetch(p.prev, entrySerializer); pp.next = p.next; db.update(p.prev, pp, entrySerializer); } //update entry after next if (p.next != 0) { Entry pn = db.fetch(p.next, entrySerializer); pn.prev = p.prev; db.update(p.next, pn, entrySerializer); } //remove old record from db db.delete(prev); //update list Root r = getRoot(); if (r.first == prev) r.first = next; if (r.last == prev) r.last = next; r.size--; db.update(rootRecid, r,ROOT_SERIALIZER); modCount++; expectedModCount++; //update iterator prev = p.prev; } else if (lastOper == -1) { //last operation was prev() so remove next element lastOper = 0; Entry n = db.fetch(next, entrySerializer); //update entry before next if (n.prev != 0) { Entry pp = db.fetch(n.prev, entrySerializer); pp.next = n.next; db.update(n.prev, pp, entrySerializer); } //update entry after previous if (n.next != 0) { Entry pn = db.fetch(n.next, entrySerializer); pn.prev = n.prev; db.update(n.next, pn, entrySerializer); } //remove old record from db db.delete(next); //update list Root r = getRoot(); if (r.last == next) r.last = prev; if (r.first == next) r.first = prev; r.size--; db.update(rootRecid, r,ROOT_SERIALIZER); modCount++; expectedModCount++; //update iterator next = n.next; } else throw new IllegalStateException(); } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } public void set(E value) { checkForComodification(); lock.writeLock().lock(); try { if (lastOper == 1) { //last operation was next(), so update previous item lastOper = 0; Entry n = db.fetch(prev, entrySerializer); n.value = value; db.update(prev, n, entrySerializer); } else if (lastOper == -1) { //last operation was prev() so update next item lastOper = 0; Entry n = db.fetch(next, entrySerializer); n.value = value; db.update(next, n, entrySerializer); } else throw new IllegalStateException(); } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } public void add(E value) { checkForComodification(); //use more efficient method if possible if (next == 0) { LinkedList2.this.add(value); expectedModCount++; return; } lock.writeLock().lock(); try { //insert new entry Entry e = new Entry(prev, next, value); long recid = db.insert(e, entrySerializer,false); //update previous entry if (prev != 0) { Entry p = db.fetch(prev, entrySerializer); if (p.next != next) throw new Error(); p.next = recid; db.update(prev, p, entrySerializer); } //update next entry Entry n = fetch(next); if (n.prev != prev) throw new Error(); n.prev = recid; db.update(next, n, entrySerializer); //update List Root r = getRoot(); r.size++; db.update(rootRecid, r, ROOT_SERIALIZER); //update iterator expectedModCount++; modCount++; prev = recid; } catch (IOException e) { throw new IOError(e); }finally { lock.writeLock().unlock(); } } final void checkForComodification() { if (modCount != expectedModCount) throw new ConcurrentModificationException(); } } /** * Copyes collection from one db to other, while keeping logical recids unchanged */ static void defrag(long recid, DBStore r1, DBStore r2) throws IOException { try { //move linked list itself byte[] data = r1.fetchRaw(recid); r2.forceInsert(recid, data); DataInputOutput in = new DataInputOutput(); in.reset(data); LinkedList2 l = (LinkedList2) r1.defaultSerializer().deserialize(in); l.loadValues = false; //move linkedlist root if(l.rootRecid == 0) //empty list, done return; data = r1.fetchRaw(l.rootRecid); r2.forceInsert(l.rootRecid, data); in.reset(data); Root r = ROOT_SERIALIZER.deserialize(in); //move all other nodes in linked list long current = r.first; while (current != 0) { data = r1.fetchRaw(current); in.reset(data); r2.forceInsert(current, data); Entry e = (Entry) l.entrySerializer.deserialize(in); current = e.next; } } catch (ClassNotFoundException e) { throw new IOError(e); } } } ================================================ FILE: src/main/java/org/apache/jdbm/LogicalRowIdManager.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.util.Arrays; /** * This class manages the linked lists of logical rowid pages. */ final class LogicalRowIdManager { // our record file and associated page manager private final PageFile file; private final PageManager pageman; static final short ELEMS_PER_PAGE = (short) ((Storage.PAGE_SIZE - Magic.PAGE_HEADER_SIZE) / Magic.PhysicalRowId_SIZE); private long[] freeRecordsInTransRowid = new long[4]; private int freeRecordsInTransSize = 0; /** number of free logical rowids on logical free page, is SHORT*/ static final int OFFSET_FREE_COUNT = Magic.PAGE_HEADER_SIZE; static final int FREE_HEADER_SIZE = Magic.PAGE_HEADER_SIZE + Magic.SZ_SHORT; /** maximal number of free logical per page */ static final int FREE_RECORDS_PER_PAGE = (Storage.PAGE_SIZE -FREE_HEADER_SIZE)/6; /** * Creates a log rowid manager using the indicated record file and page manager */ LogicalRowIdManager(PageFile file, PageManager pageman) throws IOException { this.file = file; this.pageman = pageman; } /** * Creates a new logical rowid pointing to the indicated physical id * * @param physloc physical location to point to * @return logical recid */ long insert(final long physloc) throws IOException { // check whether there's a free rowid to reuse long retval = getFreeSlot(); if (retval == 0) { // no. This means that we bootstrap things by allocating // a new translation page and freeing all the rowids on it. long firstPage = pageman.allocate(Magic.TRANSLATION_PAGE); short curOffset = Magic.PAGE_HEADER_SIZE; for (int i = 0; i < ELEMS_PER_PAGE; i++) { putFreeSlot(((-firstPage) << Storage.PAGE_SIZE_SHIFT) + (long) curOffset); curOffset += Magic.PhysicalRowId_SIZE; } retval = getFreeSlot(); if (retval == 0) { throw new Error("couldn't obtain free translation"); } } // write the translation. update(retval, physloc); return retval; } /** * Insert at forced location, use only for defragmentation !! * * @param logicalRowId * @param physLoc * @throws IOException */ void forceInsert(final long logicalRowId, final long physLoc) throws IOException { if (fetch(logicalRowId) != 0) throw new Error("can not forceInsert, record already exists: " + logicalRowId); update(logicalRowId, physLoc); } /** * Releases the indicated logical rowid. */ void delete(final long logicalrowid) throws IOException { //zero out old location, is needed for defragmentation final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT); final PageIo xlatPage = file.get(pageId); xlatPage.pageHeaderSetLocation((short) (logicalrowid & Storage.OFFSET_MASK), 0); file.release(pageId, true); putFreeSlot(logicalrowid); } /** * Updates the mapping * * @param logicalrowid The logical rowid * @param physloc The physical rowid */ void update(final long logicalrowid, final long physloc) throws IOException { final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT); final PageIo xlatPage = file.get(pageId); xlatPage.pageHeaderSetLocation((short) (logicalrowid & Storage.OFFSET_MASK), physloc); file.release(pageId, true); } /** * Returns a mapping * * @param logicalrowid The logical rowid * @return The physical rowid, 0 if does not exist */ long fetch(long logicalrowid) throws IOException { final long pageId = -(logicalrowid>>> Storage.PAGE_SIZE_SHIFT); final long last = pageman.getLast(Magic.TRANSLATION_PAGE); if (last - 1 > pageId) return 0; final short offset = (short) (logicalrowid & Storage.OFFSET_MASK); final PageIo xlatPage = file.get(pageId); final long ret = xlatPage.pageHeaderGetLocation(offset); file.release(pageId, false); return ret; } void commit() throws IOException { if(freeRecordsInTransSize==0) return; long freeRecPageId = pageman.getLast(Magic.FREELOGIDS_PAGE); if(freeRecPageId == 0){ //allocate new freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE); } PageIo freeRecPage = file.get(freeRecPageId); //write all uncommited free records for(int rowPos = 0;rowPos128) freeRecordsInTransRowid = new long[4]; freeRecordsInTransSize = 0; } void rollback() throws IOException { clearFreeRecidsInTransaction(); } /** * Returns a free Logical rowid, or * 0 if nothing was found. */ long getFreeSlot() throws IOException { if (freeRecordsInTransSize != 0) { return freeRecordsInTransRowid[--freeRecordsInTransSize]; } final long logicFreePageId = pageman.getLast(Magic.FREELOGIDS_PAGE); if(logicFreePageId == 0) { return 0; } PageIo logicFreePage = file.get(logicFreePageId); short recCount = logicFreePage.readShort(OFFSET_FREE_COUNT); if(recCount <= 0){ throw new InternalError(); } final int offset = (recCount -1) *6 + FREE_HEADER_SIZE; final long ret = logicFreePage.readSixByteLong(offset); recCount--; if(recCount>0){ //decrease counter and zero out old record logicFreePage.writeSixByteLong(offset,0); logicFreePage.writeShort(OFFSET_FREE_COUNT, recCount); file.release(logicFreePage); }else{ //release this page file.release(logicFreePage); pageman.free(Magic.FREELOGIDS_PAGE,logicFreePageId); } return ret; } /** * Puts the indicated rowid on the free list */ void putFreeSlot(long rowid) throws IOException { //ensure capacity if(freeRecordsInTransSize == freeRecordsInTransRowid.length) freeRecordsInTransRowid = Arrays.copyOf(freeRecordsInTransRowid, freeRecordsInTransRowid.length * 4); //add record and increase size freeRecordsInTransRowid[freeRecordsInTransSize]=rowid; freeRecordsInTransSize++; } } ================================================ FILE: src/main/java/org/apache/jdbm/LongHashMap.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.io.Serializable; import java.util.Arrays; import java.util.Iterator; import java.util.NoSuchElementException; /** * Hash Map which uses primitive long as key. * Main advantage is new instanceof of Long does not have to be created for each lookup. *

* This code comes from Android, which in turns comes from Apache Harmony. * This class was modified to use primitive longs and stripped down to consume less space. *

* Author of JDBM modifications: Jan Kotek */ class LongHashMap implements Serializable { private static final long serialVersionUID = 362499999763181265L; private int elementCount; private Entry[] elementData; private final float loadFactor; private int threshold; private int defaultSize = 16; private transient Entry reuseAfterDelete = null; static final class Entry implements Serializable{ private static final long serialVersionUID = 362445231113181265L; Entry next; V value; long key; Entry(long theKey) { this.key = theKey; this.value = null; } } static class HashMapIterator implements Iterator { private int position = 0; boolean canRemove = false; Entry entry; Entry lastEntry; final LongHashMap associatedMap; HashMapIterator(LongHashMap hm) { associatedMap = hm; } public boolean hasNext() { if (entry != null) { return true; } Entry[] elementData = associatedMap.elementData; int length = elementData.length; int newPosition = position; boolean result = false; while (newPosition < length) { if (elementData[newPosition] == null) { newPosition++; } else { result = true; break; } } position = newPosition; return result; } public V next() { if (!hasNext()) { throw new NoSuchElementException(); } Entry result; Entry _entry = entry; if (_entry == null) { result = lastEntry = associatedMap.elementData[position++]; entry = lastEntry.next; } else { if (lastEntry.next != _entry) { lastEntry = lastEntry.next; } result = _entry; entry = _entry.next; } canRemove = true; return result.value; } public void remove() { if (!canRemove) { throw new IllegalStateException(); } canRemove = false; if (lastEntry.next == entry) { while (associatedMap.elementData[--position] == null) { // Do nothing } associatedMap.elementData[position] = associatedMap.elementData[position].next; entry = null; } else { lastEntry.next = entry; } if (lastEntry != null) { Entry reuse = lastEntry; lastEntry = null; reuse.key = Long.MIN_VALUE; reuse.value = null; associatedMap.reuseAfterDelete = reuse; } associatedMap.elementCount--; } } @SuppressWarnings("unchecked") private Entry[] newElementArray(int s) { return new Entry[s]; } /** * Constructs a new empty {@code HashMap} instance. * * @since Android 1.0 */ public LongHashMap() { this(16); } /** * Constructs a new {@code HashMap} instance with the specified capacity. * * @param capacity the initial capacity of this hash map. * @throws IllegalArgumentException when the capacity is less than zero. * @since Android 1.0 */ public LongHashMap(int capacity) { defaultSize = capacity; if (capacity >= 0) { elementCount = 0; elementData = newElementArray(capacity == 0 ? 1 : capacity); loadFactor = 0.75f; // Default load factor of 0.75 computeMaxSize(); } else { throw new IllegalArgumentException(); } } // BEGIN android-changed /** * Removes all mappings from this hash map, leaving it empty. * * @see #isEmpty * @see #size * @since Android 1.0 */ public void clear() { if (elementCount > 0) { elementCount = 0; } if(elementData.length>1024 && elementData.length>defaultSize) elementData = new Entry[defaultSize]; else Arrays.fill(elementData, null); computeMaxSize(); } // END android-changed /** * Returns a shallow copy of this map. * * @return a shallow copy of this map. * @since Android 1.0 */ private void computeMaxSize() { threshold = (int) (elementData.length * loadFactor); } /** * Returns the value of the mapping with the specified key. * * @param key the key. * @return the value of the mapping with the specified key, or {@code null} * if no mapping for the specified key is found. * @since Android 1.0 */ public V get(final long key) { final int hash = powerHash(key); final int index = (hash & 0x7FFFFFFF) % elementData.length; //find non null entry Entry m = elementData[index]; while (m != null) { if (key == m.key) return m.value; m = m.next; } return null; } /** * Returns whether this map is empty. * * @return {@code true} if this map has no elements, {@code false} * otherwise. * @see #size() * @since Android 1.0 */ public boolean isEmpty() { return elementCount == 0; } /** * @return iterator over keys */ // public Iterator keyIterator(){ // return new HashMapIterator( // new MapEntry.Type() { // public K get(Entry entry) { // return entry.key; // } // }, HashMap.this); // // } /** * Maps the specified key to the specified value. * * @param key the key. * @param value the value. * @return the value of any previous mapping with the specified key or * {@code null} if there was no such mapping. * @since Android 1.0 */ public V put(final long key, final V value) { int hash = powerHash(key); int index = (hash & 0x7FFFFFFF) % elementData.length; //find non null entry Entry entry = elementData[index]; while (entry != null && key != entry.key) { entry = entry.next; } if (entry == null) { if (++elementCount > threshold) { rehash(); index = (hash & 0x7FFFFFFF) % elementData.length; } entry = createHashedEntry(key, index); } V result = entry.value; entry.value = value; return result; } Entry createHashedEntry(final long key, final int index) { Entry entry = reuseAfterDelete; if (entry == null) { entry = new Entry(key); } else { reuseAfterDelete = null; entry.key = key; entry.value = null; } entry.next = elementData[index]; elementData[index] = entry; return entry; } void rehash(final int capacity) { int length = (capacity == 0 ? 1 : capacity << 1); Entry[] newData = newElementArray(length); for (int i = 0; i < elementData.length; i++) { Entry entry = elementData[i]; while (entry != null) { int index = ((int) powerHash(entry.key) & 0x7FFFFFFF) % length; Entry next = entry.next; entry.next = newData[index]; newData[index] = entry; entry = next; } } elementData = newData; computeMaxSize(); } void rehash() { rehash(elementData.length); } /** * Removes the mapping with the specified key from this map. * * @param key the key of the mapping to remove. * @return the value of the removed mapping or {@code null} if no mapping * for the specified key was found. * @since Android 1.0 */ public V remove(final long key) { Entry entry = removeEntry(key); if (entry == null) return null; V ret = entry.value; entry.value = null; entry.key = Long.MIN_VALUE; reuseAfterDelete = entry; return ret; } Entry removeEntry(final long key) { Entry last = null; final int hash = powerHash(key); final int index = (hash & 0x7FFFFFFF) % elementData.length; Entry entry = elementData[index]; while (true) { if (entry == null) { return null; } if (key == entry.key) { if (last == null) { elementData[index] = entry.next; } else { last.next = entry.next; } elementCount--; return entry; } last = entry; entry = entry.next; } } /** * Returns the number of elements in this map. * * @return the number of elements in this map. * @since Android 1.0 */ public int size() { return elementCount; } /** * @returns iterator over values in map */ public Iterator valuesIterator() { return new HashMapIterator(this); } static final private int powerHash(final long key){ int h = (int)(key ^ (key >>> 32)); h ^= (h >>> 20) ^ (h >>> 12); return h ^ (h >>> 7) ^ (h >>> 4); } } ================================================ FILE: src/main/java/org/apache/jdbm/LongPacker.java ================================================ /* Copyright (c) 2008, Nathan Sweet All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Esoteric Software nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.apache.jdbm; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /** * Packing utility for non-negative long and values. *

* Originally developed for Kryo by Nathan Sweet. * Modified for JDBM by Jan Kotek */ public final class LongPacker { /** * Pack non-negative long into output stream. * It will occupy 1-10 bytes depending on value (lower values occupy smaller space) * * @param os * @param value * @throws IOException */ static public void packLong(DataOutput os, long value) throws IOException { if (value < 0) { throw new IllegalArgumentException("negative value: v=" + value); } while ((value & ~0x7FL) != 0) { os.write((((int) value & 0x7F) | 0x80)); value >>>= 7; } os.write((byte) value); } /** * Unpack positive long value from the input stream. * * @param is The input stream. * @return The long value. * @throws java.io.IOException */ static public long unpackLong(DataInput is) throws IOException { long result = 0; for (int offset = 0; offset < 64; offset += 7) { long b = is.readUnsignedByte(); result |= (b & 0x7F) << offset; if ((b & 0x80) == 0) { return result; } } throw new Error("Malformed long."); } /** * Pack non-negative long into output stream. * It will occupy 1-5 bytes depending on value (lower values occupy smaller space) * * @param os * @param value * @throws IOException */ static public void packInt(DataOutput os, int value) throws IOException { if (value < 0) { throw new IllegalArgumentException("negative value: v=" + value); } while ((value & ~0x7F) != 0) { os.write(((value & 0x7F) | 0x80)); value >>>= 7; } os.write((byte) value); } static public int unpackInt(DataInput is) throws IOException { for (int offset = 0, result = 0; offset < 32; offset += 7) { int b = is.readUnsignedByte(); result |= (b & 0x7F) << offset; if ((b & 0x80) == 0) { return result; } } throw new Error("Malformed integer."); } } ================================================ FILE: src/main/java/org/apache/jdbm/Magic.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; /** * This interface contains magic cookies. */ interface Magic { /** * Magic cookie at start of file */ short FILE_HEADER = 0x1350; /** * Magic for pages. They're offset by the page type magic codes. */ short PAGE_MAGIC = 0x1351; /** * Magics for pages in certain lists. */ short FREE_PAGE = 0; short USED_PAGE = 1; short TRANSLATION_PAGE = 2; short FREELOGIDS_PAGE = 3; short FREEPHYSIDS_PAGE = 4; short FREEPHYSIDS_ROOT_PAGE = 5; /** * Number of lists in a file */ short NLISTS = 6; /** * Magic for transaction file */ short LOGFILE_HEADER = 0x1360; /** * Size of an externalized byte */ short SZ_BYTE = 1; /** * Size of an externalized short */ short SZ_SHORT = 2; /** * Size of an externalized int */ short SZ_INT = 4; /** * Size of an externalized long */ short SZ_LONG = 8; /** * size of three byte integer */ short SZ_SIX_BYTE_LONG = 6; /**offsets in file header (zero page in file)*/ short FILE_HEADER_O_MAGIC = 0; // short magic short FILE_HEADER_O_LISTS = Magic.SZ_SHORT; // long[2*NLISTS] int FILE_HEADER_O_ROOTS = FILE_HEADER_O_LISTS + (Magic.NLISTS * 2 * Magic.SZ_LONG); /** * The number of "root" rowids available in the file. */ int FILE_HEADER_NROOTS = 16; short PAGE_HEADER_O_MAGIC = 0; // short magic short PAGE_HEADER_O_NEXT = Magic.SZ_SHORT; short PAGE_HEADER_O_PREV = PAGE_HEADER_O_NEXT + Magic.SZ_SIX_BYTE_LONG; short PAGE_HEADER_SIZE = PAGE_HEADER_O_PREV + Magic.SZ_SIX_BYTE_LONG; short PhysicalRowId_O_LOCATION = 0; // long page // short PhysicalRowId_O_OFFSET = Magic.SZ_SIX_BYTE_LONG; // short offset int PhysicalRowId_SIZE = Magic.SZ_SIX_BYTE_LONG; short DATA_PAGE_O_FIRST = PAGE_HEADER_SIZE; // short firstrowid short DATA_PAGE_O_DATA = (short) (DATA_PAGE_O_FIRST + Magic.SZ_SHORT); short DATA_PER_PAGE = (short) (Storage.PAGE_SIZE - DATA_PAGE_O_DATA); } ================================================ FILE: src/main/java/org/apache/jdbm/ObjectInputStream2.java ================================================ package org.apache.jdbm; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.io.ObjectInput; import java.util.ArrayList; /** * An alternative to java.io.ObjectInputStream which uses more efficient serialization */ public class ObjectInputStream2 extends DataInputStream implements ObjectInput { public ObjectInputStream2(InputStream in) { super(in); } public Object readObject() throws ClassNotFoundException, IOException { //first read class data ArrayList info = SerialClassInfo.serializer.deserialize(this); Serialization ser = new Serialization(null,0,info); return ser.deserialize(this); } } ================================================ FILE: src/main/java/org/apache/jdbm/ObjectOutputStream2.java ================================================ package org.apache.jdbm; import java.io.*; import java.util.ArrayList; /** * An alternative to java.io.ObjectOutputStream which uses more efficient serialization */ public class ObjectOutputStream2 extends DataOutputStream implements ObjectOutput { public ObjectOutputStream2(OutputStream out) { super(out); } public void writeObject(Object obj) throws IOException { ArrayList registered = new ArrayList(); Serialization ser = new Serialization(null,0,registered); byte[] data = ser.serialize(obj); //write class info first SerialClassInfo.serializer.serialize(this, registered); //and write data write(data); } } ================================================ FILE: src/main/java/org/apache/jdbm/PageFile.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.IOError; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Iterator; /** * This class represents a random access file as a set of fixed size * records. Each record has a physical record number, and records are * cached in order to improve access. *

* The set of dirty records on the in-use list constitutes a transaction. * Later on, we will send these records to some recovery thingy. *

* PageFile is splited between more files, each with max size 1GB. */ final class PageFile { final PageTransactionManager txnMgr; /** * Pages currently locked for read/update ops. When released the page goes * to the dirty or clean list, depending on a flag. The file header page is * normally locked plus the page that is currently being read or modified. * * @see PageIo#isDirty() */ private final LongHashMap inUse = new LongHashMap(); /** * Pages whose state is dirty. */ private final LongHashMap dirty = new LongHashMap(); /** * Pages in a historical transaction(s) that have been written * onto the log but which have not yet been committed to the database. */ private final LongHashMap inTxn = new LongHashMap(); // transactions disabled? final boolean transactionsDisabled; /** * A array of clean data to wipe clean pages. */ static final byte[] CLEAN_DATA = new byte[Storage.PAGE_SIZE]; final Storage storage; private Cipher cipherOut; private Cipher cipherIn; /** * Creates a new object on the indicated filename. The file is * opened in read/write mode. * * @param fileName the name of the file to open or create, without * an extension. * @throws IOException whenever the creation of the underlying * RandomAccessFile throws it. */ PageFile(String fileName, boolean readonly, boolean transactionsDisabled, Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile, boolean lockingDisabled) throws IOException { this.cipherIn = cipherIn; this.cipherOut = cipherOut; this.transactionsDisabled = transactionsDisabled; if(fileName == null){ this.storage = new StorageMemory(transactionsDisabled); }else if(DBMaker.isZipFileLocation(fileName)!=null) this.storage = new StorageZip(DBMaker.isZipFileLocation(fileName)); // }else if (fileName.contains("!/")) // this.storage = new StorageZip(fileName); else if(useRandomAccessFile) this.storage = new StorageDisk(fileName,readonly,lockingDisabled); else this.storage = new StorageDiskMapped(fileName,readonly,transactionsDisabled,lockingDisabled); if (this.storage.isReadonly() && !readonly) throw new IllegalArgumentException("This type of storage is readonly, you should call readonly() on DBMaker"); if (!readonly && !transactionsDisabled) { txnMgr = new PageTransactionManager(this, storage, cipherIn, cipherOut); } else { txnMgr = null; } } public PageFile(String filename) throws IOException { this(filename, false, false, null, null,false,false); } /** * Gets a page from the file. The returned byte array is * the in-memory copy of the record, and thus can be written * (and subsequently released with a dirty flag in order to * write the page back). If transactions are disabled, changes * may be written directly * * @param pageId The record number to retrieve. */ PageIo get(long pageId) throws IOException { // try in transaction list, dirty list, free list PageIo node = inTxn.get(pageId); if (node != null) { inTxn.remove(pageId); inUse.put(pageId, node); return node; } node = dirty.get(pageId); if (node != null) { dirty.remove(pageId); inUse.put(pageId, node); return node; } // sanity check: can't be on in use list if (inUse.get(pageId) != null) { throw new Error("double get for page " + pageId); } //read node from file if (cipherOut == null) { node = new PageIo(pageId,storage.read(pageId)); } else { //decrypt if needed ByteBuffer b = storage.read(pageId); byte[] bb; if(b.hasArray()){ bb = b.array(); }else{ bb = new byte[Storage.PAGE_SIZE]; b.position(0); b.get(bb, 0, Storage.PAGE_SIZE); } if (!Utils.allZeros(bb)) try { bb = cipherOut.doFinal(bb); node = new PageIo(pageId, ByteBuffer.wrap(bb)); } catch (Exception e) { throw new IOError(e); }else { node = new PageIo(pageId, ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer()); } } inUse.put(pageId, node); node.setClean(); return node; } /** * Releases a page. * * @param pageId The record number to release. * @param isDirty If true, the page was modified since the get(). */ void release(final long pageId, final boolean isDirty) throws IOException { final PageIo page = inUse.remove(pageId); if (!page.isDirty() && isDirty) page.setDirty(); if (page.isDirty()) { dirty.put(pageId, page); } else if (!transactionsDisabled && page.isInTransaction()) { inTxn.put(pageId, page); } } /** * Releases a page. * * @param page The page to release. */ void release(final PageIo page) throws IOException { final long key = page.getPageId(); inUse.remove(key); if (page.isDirty()) { // System.out.println( "Dirty: " + key + page ); dirty.put(key, page); } else if (!transactionsDisabled && page.isInTransaction()) { inTxn.put(key, page); } } /** * Discards a page (will not write the page even if it's dirty) * * @param page The page to discard. */ void discard(PageIo page) { long key = page.getPageId(); inUse.remove(key); } /** * Commits the current transaction by flushing all dirty buffers * to disk. */ void commit() throws IOException { // debugging... if (!inUse.isEmpty() && inUse.size() > 1) { showList(inUse.valuesIterator()); throw new Error("in use list not empty at commit time (" + inUse.size() + ")"); } // System.out.println("committing..."); if (dirty.size() == 0) { // if no dirty pages, skip commit process return; } if (!transactionsDisabled) { txnMgr.start(); } //sort pages by IDs long[] pageIds = new long[dirty.size()]; int c = 0; for (Iterator i = dirty.valuesIterator(); i.hasNext(); ) { pageIds[c] = i.next().getPageId(); c++; } Arrays.sort(pageIds); for (long pageId : pageIds) { PageIo node = dirty.get(pageId); // System.out.println("node " + node + " map size now " + dirty.size()); if (transactionsDisabled) { if(cipherIn !=null) storage.write(node.getPageId(), ByteBuffer.wrap(Utils.encrypt(cipherIn, node.getData()))); else storage.write(node.getPageId(),node.getData()); node.setClean(); } else { txnMgr.add(node); inTxn.put(node.getPageId(), node); } } dirty.clear(); if (!transactionsDisabled) { txnMgr.commit(); } } /** * Rollback the current transaction by discarding all dirty buffers */ void rollback() throws IOException { // debugging... if (!inUse.isEmpty()) { showList(inUse.valuesIterator()); throw new Error("in use list not empty at rollback time (" + inUse.size() + ")"); } // System.out.println("rollback..."); dirty.clear(); txnMgr.synchronizeLogFromDisk(); if (!inTxn.isEmpty()) { showList(inTxn.valuesIterator()); throw new Error("in txn list not empty at rollback time (" + inTxn.size() + ")"); } ; } /** * Commits and closes file. */ void close() throws IOException { if (!dirty.isEmpty()) { commit(); } if(!transactionsDisabled && txnMgr!=null){ txnMgr.shutdown(); } if (!inTxn.isEmpty()) { showList(inTxn.valuesIterator()); throw new Error("In transaction not empty"); } // these actually ain't that bad in a production release if (!dirty.isEmpty()) { System.out.println("ERROR: dirty pages at close time"); showList(dirty.valuesIterator()); throw new Error("Dirty pages at close time"); } if (!inUse.isEmpty()) { System.out.println("ERROR: inUse pages at close time"); showList(inUse.valuesIterator()); throw new Error("inUse pages at close time"); } storage.sync(); storage.forceClose(); } /** * Force closing the file and underlying transaction manager. * Used for testing purposed only. */ void forceClose() throws IOException { if(!transactionsDisabled){ txnMgr.forceClose(); } storage.forceClose(); } /** * Prints contents of a list */ private void showList(Iterator i) { int cnt = 0; while (i.hasNext()) { System.out.println("elem " + cnt + ": " + i.next()); cnt++; } } /** * Synchs a node to disk. This is called by the transaction manager's * synchronization code. */ void synch(PageIo node) throws IOException { ByteBuffer data = node.getData(); if (data != null) { if(cipherIn!=null) storage.write(node.getPageId(), ByteBuffer.wrap(Utils.encrypt(cipherIn, data))); else storage.write(node.getPageId(), data); } } /** * Releases a node from the transaction list, if it was sitting * there. */ void releaseFromTransaction(PageIo node) throws IOException { inTxn.remove(node.getPageId()); } /** * Synchronizes the file. */ void sync() throws IOException { storage.sync(); } public int getDirtyPageCount() { return dirty.size(); } public void deleteAllFiles() throws IOException { storage.deleteAllFiles(); } } ================================================ FILE: src/main/java/org/apache/jdbm/PageIo.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.*; import java.nio.ByteBuffer; import static org.apache.jdbm.Magic.*; /** * Wraps a page sizes ByteBuffer for reading and writing. *

* ByteBuffer may be subview of a larger buffer (ie large buffer mapped over a file). * In this case ByteBuffer will have set limit, mark and other variables to limit its size. *

* For reading buffered may be shared. For example StoreMemory just returns its pages without copying. * In this case buffer is marked as 'readonly' and needs to be copied before write (Copy On Write - COW). * COW is not necessary if transactions are disabled and changes can not be rolled back. *

*/ final class PageIo { private long pageId; private ByteBuffer data; // work area /** buffers contains changes which were not written to disk yet. */ private boolean dirty = false; private int transactionCount = 0; /** * Default constructor for serialization */ public PageIo() { // empty } /** * Constructs a new PageIo instance working on the indicated * buffer. */ PageIo(long pageId, byte[] data) { this.pageId = pageId; this.data = ByteBuffer.wrap(data); } public PageIo(long pageId, ByteBuffer data) { this.pageId = pageId; this.data = data; } /** Frequent reads on direct buffer may be slower then on heap buffer. * This method converts native direct to heap buffer */ void ensureHeapBuffer(){ if(data.isDirect()){ final byte[] bb = new byte[Storage.PAGE_SIZE]; data.get(bb,0,Storage.PAGE_SIZE); data = ByteBuffer.wrap(bb); if(data.isReadOnly()) throw new InternalError(); } } /** * Returns the underlying array */ ByteBuffer getData() { return data; } /** * Returns the page number. */ long getPageId() { return pageId; } /** * Sets the dirty flag */ void setDirty() { dirty = true; if(data.isReadOnly()){ // make copy if needed, so we can write into buffer byte[] buf = new byte[Storage.PAGE_SIZE]; data.get(buf,0,Storage.PAGE_SIZE); data = ByteBuffer.wrap(buf); } } /** * Clears the dirty flag */ void setClean() { dirty = false; } /** * Returns true if the dirty flag is set. */ boolean isDirty() { return dirty; } /** * Returns true if the block is still dirty with respect to the * transaction log. */ boolean isInTransaction() { return transactionCount != 0; } /** * Increments transaction count for this block, to signal that this * block is in the log but not yet in the data file. The method also * takes a snapshot so that the data may be modified in new transactions. */ void incrementTransactionCount() { transactionCount++; } /** * Decrements transaction count for this block, to signal that this * block has been written from the log to the data file. */ void decrementTransactionCount() { transactionCount--; if (transactionCount < 0) throw new Error("transaction count on page " + getPageId() + " below zero!"); } /** * Reads a byte from the indicated position */ public byte readByte(int pos) { return data.get(pos); } /** * Writes a byte to the indicated position */ public void writeByte(int pos, byte value) { setDirty(); data.put(pos,value); } /** * Reads a short from the indicated position */ public short readShort(int pos) { return data.getShort(pos); } /** * Writes a short to the indicated position */ public void writeShort(int pos, short value) { setDirty(); data.putShort(pos,value); } /** * Reads an int from the indicated position */ public int readInt(int pos) { return data.getInt(pos); } /** * Writes an int to the indicated position */ public void writeInt(int pos, int value) { setDirty(); data.putInt(pos,value); } /** * Reads a long from the indicated position */ public long readLong(int pos) { return data.getLong(pos); } /** * Writes a long to the indicated position */ public void writeLong(int pos, long value) { setDirty(); data.putLong(pos,value); } /** * Reads a long from the indicated position */ public long readSixByteLong(int pos) { long ret = ((long) (data.get(pos + 0) & 0x7f) << 40) | ((long) (data.get(pos + 1) & 0xff) << 32) | ((long) (data.get(pos + 2) & 0xff) << 24) | ((long) (data.get(pos + 3) & 0xff) << 16) | ((long) (data.get(pos + 4) & 0xff) << 8) | ((long) (data.get(pos + 5) & 0xff) << 0); if((data.get(pos + 0) & 0x80) != 0) return -ret; else return ret; } /** * Writes a long to the indicated position */ public void writeSixByteLong(int pos, long value) { // if(value<0) throw new IllegalArgumentException(); // if(value >> (6*8)!=0) // throw new IllegalArgumentException("does not fit"); int negativeBit = 0; if(value<0){ value = -value; negativeBit = 0x80; } setDirty(); data.put(pos + 0,(byte) ((0x7f & (value >> 40)) | negativeBit)); data.put(pos + 1, (byte) (0xff & (value >> 32))); data.put(pos + 2, (byte) (0xff & (value >> 24))); data.put(pos + 3, (byte) (0xff & (value >> 16))); data.put(pos + 4, (byte) (0xff & (value >> 8))); data.put(pos + 5, (byte) (0xff & (value >> 0))); } // overrides java.lang.Object public String toString() { return "PageIo(" + pageId + "," + dirty +")"; } public void readExternal(DataInputStream in, Cipher cipherOut) throws IOException { pageId = in.readLong(); byte[] data2 = new byte[Storage.PAGE_SIZE]; in.readFully(data2); if (cipherOut == null || Utils.allZeros(data2)) data = ByteBuffer.wrap(data2); else try { data = ByteBuffer.wrap(cipherOut.doFinal(data2)); } catch (Exception e) { throw new IOError(e); } } public void writeExternal(DataOutput out, Cipher cipherIn) throws IOException { out.writeLong(pageId); out.write(Utils.encrypt(cipherIn, data.array())); } public byte[] getByteArray() { if ( data.hasArray()) return data.array(); byte[] d= new byte[Storage.PAGE_SIZE]; data.rewind(); data.get(d,0,Storage.PAGE_SIZE); return d; } public void writeByteArray(byte[] buf, int srcOffset, int offset, int length) { setDirty(); data.rewind(); data.position(offset); data.put(buf,srcOffset,length); } public void fileHeaderCheckHead(boolean isNew){ if (isNew) writeShort(FILE_HEADER_O_MAGIC, Magic.FILE_HEADER); else{ short magic = readShort(FILE_HEADER_O_MAGIC); if(magic!=FILE_HEADER) throw new Error("CRITICAL: file header magic not OK " + magic); } } /** * Returns the first page of the indicated list */ long fileHeaderGetFirstOf(int list) { return readLong(fileHeaderOffsetOfFirst(list)); } /** * Sets the first page of the indicated list */ void fileHeaderSetFirstOf(int list, long value) { writeLong(fileHeaderOffsetOfFirst(list), value); } /** * Returns the last page of the indicated list */ long fileHeaderGetLastOf(int list) { return readLong(fileHeaderOffsetOfLast(list)); } /** * Sets the last page of the indicated list */ void fileHeaderSetLastOf(int list, long value) { writeLong(fileHeaderOffsetOfLast(list), value); } /** * Returns the offset of the "first" page of the indicated list */ private short fileHeaderOffsetOfFirst(int list) { return (short) (FILE_HEADER_O_LISTS + (2 * Magic.SZ_LONG * list)); } /** * Returns the offset of the "last" page of the indicated list */ private short fileHeaderOffsetOfLast(int list) { return (short) (fileHeaderOffsetOfFirst(list) + Magic.SZ_LONG); } /** * Returns the indicated root rowid. A root rowid is a special rowid * that needs to be kept between sessions. It could conceivably be * stored in a special file, but as a large amount of space in the * page header is wasted anyway, it's more useful to store it where * it belongs. * */ long fileHeaderGetRoot(final int root) { final short offset = (short) (FILE_HEADER_O_ROOTS + (root * Magic.SZ_LONG)); return readLong(offset); } /** * Sets the indicated root rowid. * */ void fileHeaderSetRoot(final int root, final long rowid) { final short offset = (short) (FILE_HEADER_O_ROOTS + (root * Magic.SZ_LONG)); writeLong(offset, rowid); } /** * Returns true if the magic corresponds with the fileHeader magic. */ boolean pageHeaderMagicOk() { int magic = pageHeaderGetMagic(); return magic >= Magic.PAGE_MAGIC && magic <= (Magic.PAGE_MAGIC + Magic.FREEPHYSIDS_ROOT_PAGE); } /** * For paranoia mode */ protected void pageHeaderParanoiaMagicOk() { if (!pageHeaderMagicOk()) throw new Error("CRITICAL: page header magic not OK " + pageHeaderGetMagic()); } short pageHeaderGetMagic() { return readShort(PAGE_HEADER_O_MAGIC); } long pageHeaderGetNext() { pageHeaderParanoiaMagicOk(); return readSixByteLong(PAGE_HEADER_O_NEXT); } void pageHeaderSetNext(long next) { pageHeaderParanoiaMagicOk(); writeSixByteLong(PAGE_HEADER_O_NEXT, next); } long pageHeaderGetPrev() { pageHeaderParanoiaMagicOk(); return readSixByteLong(PAGE_HEADER_O_PREV); } void pageHeaderSetPrev(long prev) { pageHeaderParanoiaMagicOk(); writeSixByteLong(PAGE_HEADER_O_PREV, prev); } void pageHeaderSetType(short type) { writeShort(PAGE_HEADER_O_MAGIC, (short) (Magic.PAGE_MAGIC + type)); } long pageHeaderGetLocation(final short pos){ return readSixByteLong(pos + PhysicalRowId_O_LOCATION); } void pageHeaderSetLocation(short pos, long value) { writeSixByteLong(pos + PhysicalRowId_O_LOCATION, value); } short dataPageGetFirst() { return readShort(DATA_PAGE_O_FIRST); } void dataPageSetFirst(short value) { pageHeaderParanoiaMagicOk(); if (value > 0 && value < DATA_PAGE_O_DATA) throw new Error("DataPage.setFirst: offset " + value + " too small"); writeShort(DATA_PAGE_O_FIRST, value); } } ================================================ FILE: src/main/java/org/apache/jdbm/PageManager.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.nio.ByteBuffer; /** * This class manages the linked lists of pages that make up a file. */ final class PageManager { // our record file final PageFile file; private PageIo headerBuf; /** * Creates a new page manager using the indicated record file. */ PageManager(PageFile file) throws IOException { this.file = file; // check the file headerBuf.fileHeader If the magic is 0, we assume a new // file. Note that we hold on to the file header node. headerBuf = file.get(0); headerBuf.ensureHeapBuffer(); headerBuf.fileHeaderCheckHead(headerBuf.readShort(0) == 0); } /** * Allocates a page of the indicated type. Returns recid of the * page. */ long allocate(short type) throws IOException { if (type == Magic.FREE_PAGE) throw new Error("allocate of free page?"); // do we have something on the free list? long retval = headerBuf.fileHeaderGetFirstOf(Magic.FREE_PAGE); boolean isNew = false; if(type!=Magic.TRANSLATION_PAGE){ if (retval != 0) { // yes. Point to it and make the next of that page the // new first free page. headerBuf.fileHeaderSetFirstOf(Magic.FREE_PAGE, getNext(retval)); } else { // nope. make a new record retval = headerBuf.fileHeaderGetLastOf(Magic.FREE_PAGE); if (retval == 0) // very new file - allocate record #1 retval = 1; headerBuf.fileHeaderSetLastOf(Magic.FREE_PAGE, retval + 1); isNew = true; } }else{ //translation pages have different allocation scheme //and also have negative address retval = headerBuf.fileHeaderGetLastOf(Magic.TRANSLATION_PAGE) - 1; isNew = true; } // Cool. We have a record, add it to the correct list PageIo pageHdr = file.get(retval); if(isNew){ pageHdr.pageHeaderSetType(type); }else{ if (!pageHdr.pageHeaderMagicOk()) throw new Error("CRITICAL: page header magic for page "+ pageHdr.getPageId() + " not OK "+ pageHdr.pageHeaderGetMagic()); } long oldLast = headerBuf.fileHeaderGetLastOf(type); // Clean data. pageHdr.writeByteArray(PageFile.CLEAN_DATA, 0, 0, Storage.PAGE_SIZE); pageHdr.pageHeaderSetType(type); pageHdr.pageHeaderSetPrev(oldLast); pageHdr.pageHeaderSetNext(0); if (oldLast == 0) // This was the first one of this type headerBuf.fileHeaderSetFirstOf(type, retval); headerBuf.fileHeaderSetLastOf(type, retval); file.release(retval, true); // If there's a previous, fix up its pointer if (oldLast != 0) { pageHdr = file.get(oldLast); pageHdr.pageHeaderSetNext(retval); file.release(oldLast, true); } return retval; } /** * Frees a page of the indicated type. */ void free(short type, long recid) throws IOException { if (type == Magic.FREE_PAGE) throw new Error("free free page?"); if (type == Magic.TRANSLATION_PAGE) throw new Error("Translation page can not be dealocated"); if (recid == 0) throw new Error("free header page?"); // get the page and read next and previous pointers PageIo pageHdr = file.get(recid); long prev = pageHdr.pageHeaderGetPrev(); long next = pageHdr.pageHeaderGetNext(); // put the page at the front of the free list. pageHdr.pageHeaderSetType(Magic.FREE_PAGE); pageHdr.pageHeaderSetNext(headerBuf.fileHeaderGetFirstOf(Magic.FREE_PAGE)); pageHdr.pageHeaderSetPrev(0); headerBuf.fileHeaderSetFirstOf(Magic.FREE_PAGE, recid); file.release(recid, true); // remove the page from its old list if (prev != 0) { pageHdr = file.get(prev); pageHdr.pageHeaderSetNext(next); file.release(prev, true); } else { headerBuf.fileHeaderSetFirstOf(type, next); } if (next != 0) { pageHdr = file.get(next); pageHdr.pageHeaderSetPrev(prev); file.release(next, true); } else { headerBuf.fileHeaderSetLastOf(type, prev); } } /** * Returns the page following the indicated page */ long getNext(long page) throws IOException { try { return file.get(page).pageHeaderGetNext(); } finally { file.release(page, false); } } /** * Returns the page before the indicated page */ long getPrev(long page) throws IOException { try { return file.get(page).pageHeaderGetPrev(); } finally { file.release(page, false); } } /** * Returns the first page on the indicated list. */ long getFirst(short type) throws IOException { return headerBuf.fileHeaderGetFirstOf(type); } /** * Returns the last page on the indicated list. */ long getLast(short type) throws IOException { return headerBuf.fileHeaderGetLastOf(type); } /** * Commit all pending (in-memory) data by flushing the page manager. * This forces a flush of all outstanding pages (this it's an implicit * {@link PageFile#commit} as well). */ void commit() throws IOException { // write the header out file.release(headerBuf); file.commit(); // and obtain it again headerBuf = file.get(0); headerBuf.ensureHeapBuffer(); headerBuf.fileHeaderCheckHead(headerBuf.readShort(0) == 0); } /** * Flushes the page manager. This forces a flush of all outstanding * pages (this it's an implicit {@link PageFile#commit} as well). */ void rollback() throws IOException { // release header file.discard(headerBuf); file.rollback(); // and obtain it again headerBuf = file.get(0); headerBuf.ensureHeapBuffer(); headerBuf.fileHeaderCheckHead(headerBuf.readShort(0) == 0); } /** * Closes the page manager. This flushes the page manager and releases * the lock on the headerBuf.fileHeader */ void close() throws IOException { file.release(headerBuf); file.commit(); headerBuf = null; } /** * PageManager permanently locks zero page, and we need this for backups */ ByteBuffer getHeaderBufData() { return headerBuf.getData(); } public PageIo getFileHeader() { return headerBuf; } } ================================================ FILE: src/main/java/org/apache/jdbm/PageTransactionManager.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.Iterator; import java.util.TreeSet; /** * This class manages the transaction log that belongs to every * {@link PageFile}. The transaction log is either clean, or * in progress. In the latter case, the transaction manager * takes care of a roll forward. */ // TODO: Handle the case where we are recovering lg9 and lg0, were we // should start with lg9 instead of lg0! final class PageTransactionManager { private PageFile owner; // streams for transaction log. private DataOutputStream oos; /** * In-core copy of transactions. We could read everything back from * the log file, but the PageFile needs to keep the dirty pages in * core anyway, so we might as well point to them and spare us a lot * of hassle. */ private ArrayList txn = new ArrayList(); private int curTxn = -1; private Storage storage; private Cipher cipherIn; private Cipher cipherOut; /** * Instantiates a transaction manager instance. If recovery * needs to be performed, it is done. * * @param owner the PageFile instance that owns this transaction mgr. * @param storage * @param cipherIn * @param cipherOut */ PageTransactionManager(PageFile owner, Storage storage, Cipher cipherIn, Cipher cipherOut) throws IOException { this.owner = owner; this.storage = storage; this.cipherIn = cipherIn; this.cipherOut = cipherOut; recover(); open(); } /** * Synchronize log file data with the main database file. *

* After this call, the main database file is guaranteed to be * consistent and guaranteed to be the only file needed for * backup purposes. */ public void synchronizeLog() throws IOException { synchronizeLogFromMemory(); } /** * Synchs in-core transactions to data file and opens a fresh log */ private void synchronizeLogFromMemory() throws IOException { close(); TreeSet pageList = new TreeSet(PAGE_IO_COMPARTOR); int numPages = 0; int writtenPages = 0; if(txn!=null){ // Add each page to the pageList, replacing the old copy of this // page if necessary, thus avoiding writing the same page twice for (Iterator k = txn.iterator(); k.hasNext(); ) { PageIo page = k.next(); if (pageList.contains(page)) { page.decrementTransactionCount(); } else { writtenPages++; boolean result = pageList.add(page); } numPages++; } txn = null; } // Write the page from the pageList to disk synchronizePages(pageList, true); owner.sync(); open(); } /** * Opens the log file */ private void open() throws IOException { oos = storage.openTransactionLog(); oos.writeShort(Magic.LOGFILE_HEADER); oos.flush(); curTxn = -1; } /** * Startup recovery on all files */ private void recover() throws IOException { DataInputStream ois = storage.readTransactionLog(); // if transaction log is empty, or does not exist if (ois == null) return; while (true) { ArrayList pages = null; try { int size = LongPacker.unpackInt(ois); pages = new ArrayList(size); for (int i = 0; i < size; i++) { PageIo b = new PageIo(); b.readExternal(ois, cipherOut); pages.add(b); } } catch (IOException e) { // corrupted logfile, ignore rest of transactions break; } synchronizePages(pages, false); } owner.sync(); ois.close(); storage.deleteTransactionLog(); } /** * Synchronizes the indicated pages with the owner. */ private void synchronizePages(Iterable pages, boolean fromCore) throws IOException { // write pages vector elements to the data file. for (PageIo cur : pages) { owner.synch(cur); if (fromCore) { cur.decrementTransactionCount(); if (!cur.isInTransaction()) { owner.releaseFromTransaction(cur); } } } } /** * Set clean flag on the pages. */ private void setClean(ArrayList pages) throws IOException { for (PageIo cur : pages) { cur.setClean(); } } /** * Discards the indicated pages and notify the owner. */ private void discardPages(ArrayList pages) throws IOException { for (PageIo cur : pages) { cur.decrementTransactionCount(); if (!cur.isInTransaction()) { owner.releaseFromTransaction(cur); } } } /** * Starts a transaction. This can pages if all slots have been filled * with full transactions, waiting for the synchronization thread to * clean out slots. */ void start() throws IOException { curTxn++; if (curTxn == 1) { synchronizeLogFromMemory(); curTxn = 0; } txn = new ArrayList(); } /** * Indicates the page is part of the transaction. */ void add(PageIo page) throws IOException { page.incrementTransactionCount(); txn.add(page); } /** * Commits the transaction to the log file. */ void commit() throws IOException { LongPacker.packInt(oos, txn.size()); for (PageIo page : txn) { page.writeExternal(oos, cipherIn); } sync(); // set clean flag to indicate pages have been written to log setClean(txn); // open a new ObjectOutputStream in order to store // newer states of PageIo // oos = new DataOutputStream(new BufferedOutputStream(fos)); } /** * Flushes and syncs */ private void sync() throws IOException { oos.flush(); } /** * Shutdowns the transaction manager. Resynchronizes outstanding * logs. */ void shutdown() throws IOException { synchronizeLogFromMemory(); close(); } /** * Closes open files. */ private void close() throws IOException { sync(); oos.close(); oos = null; } /** * Force closing the file without synchronizing pending transaction data. * Used for testing purposes only. */ void forceClose() throws IOException { oos.close(); oos = null; } /** * Use the disk-based transaction log to synchronize the data file. * Outstanding memory logs are discarded because they are believed * to be inconsistent. */ void synchronizeLogFromDisk() throws IOException { close(); if (txn != null){ discardPages(txn); txn = null; } recover(); open(); } /** * INNER CLASS. * Comparator class for use by the tree set used to store the pages * to write for this transaction. The PageIo objects are ordered by * their page ids. */ private static final Comparator PAGE_IO_COMPARTOR = new Comparator() { public int compare(PageIo page1, PageIo page2) { if (page1.getPageId() == page2.getPageId()) { return 0; } else if (page1.getPageId() < page2.getPageId()) { return -1; } else { return 1; } } }; } ================================================ FILE: src/main/java/org/apache/jdbm/PhysicalFreeRowIdManager.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.util.Arrays; /** * This class manages free physical rowid pages and provides methods to free and allocate physical rowids on a high * level. */ final class PhysicalFreeRowIdManager { /** maximal record size which can be hold. If record crosses multiple pages, it is trimmed before added to free list */ static final int MAX_REC_SIZE = Storage.PAGE_SIZE *2; /** where data on root page starts, there are no extra data in page header */ static final int ROOT_HEADER_SIZE = Magic.PAGE_HEADER_SIZE; /** page header size for slot page */ static final int SLOT_PAGE_HEADER_SIZE = Magic.PAGE_HEADER_SIZE + Magic.SZ_SHORT + Magic.SZ_SIX_BYTE_LONG; /** number of recids on slot page */ static final int OFFSET_SLOT_PAGE_REC_COUNT = Magic.PAGE_HEADER_SIZE; static final int SLOT_PAGE_REC_NUM = (Storage.PAGE_SIZE - SLOT_PAGE_HEADER_SIZE)/6; /** pointer to next slo page in slot page header */ static final int OFFSET_SLOT_PAGE_NEXT = Magic.PAGE_HEADER_SIZE + Magic.SZ_SHORT; /** number of size slots held in root page */ static final int MAX_RECIDS_PER_PAGE = (Storage.PAGE_SIZE -ROOT_HEADER_SIZE-6) / 6; //6 is size of page pointer /** free records are grouped into slots by record size. Here is max diff in record size per group */ static final int ROOT_SLOT_SIZE = 1+MAX_REC_SIZE/ MAX_RECIDS_PER_PAGE; protected final PageFile file; protected final PageManager pageman; /** list of free phys slots in current transaction. First two bytes are size, last 6 bytes are recid*/ private long[] inTrans = new long[8]; private int inTransSize = 0; /** * Creates a new instance using the indicated record file and page manager. */ PhysicalFreeRowIdManager(PageFile file, PageManager pageman) throws IOException { this.file = file; this.pageman = pageman; } long getFreeRecord(final int size) throws IOException { if(size >= MAX_REC_SIZE) return 0; final PageIo root = getRootPage(); final int rootPageOffset = sizeToRootOffset(size+ ROOT_SLOT_SIZE); final long slotPageId = root.readSixByteLong(rootPageOffset); if(slotPageId==0){ file.release(root); return 0; } PageIo slotPage = file.get(slotPageId); if(slotPage.readShort(Magic.PAGE_HEADER_O_MAGIC) != Magic.PAGE_MAGIC + Magic.FREEPHYSIDS_PAGE) throw new InternalError(); short recidCount = slotPage.readShort(OFFSET_SLOT_PAGE_REC_COUNT); if(recidCount<=0){ throw new InternalError(); } final int offset = (recidCount-1) * 6 + SLOT_PAGE_HEADER_SIZE; final long recid = slotPage.readSixByteLong(offset); recidCount --; if(recidCount>0){ //decrease counter and zero out old record slotPage.writeSixByteLong(offset,0); slotPage.writeShort(OFFSET_SLOT_PAGE_REC_COUNT, recidCount); file.release(root); file.release(slotPage); }else{ //release this page long prevSlotPageId = slotPage.readSixByteLong(OFFSET_SLOT_PAGE_NEXT); root.writeSixByteLong(rootPageOffset,prevSlotPageId); file.release(root); file.release(slotPage); pageman.free(Magic.FREEPHYSIDS_PAGE,slotPageId); } return recid; } static final int sizeToRootOffset(int size) { return ROOT_HEADER_SIZE + 6 * (size/ROOT_SLOT_SIZE); } /** * Puts the indicated rowid on the free list, which awaits for commit */ void putFreeRecord(final long rowid, final int size) throws IOException { //ensure capacity if(inTransSize==inTrans.length){ inTrans = Arrays.copyOf(inTrans, inTrans.length * 2); } inTrans[inTransSize] = rowid + (((long)size)<<48); inTransSize++; } public void commit() throws IOException { if(inTransSize==0) return; Arrays.sort(inTrans,0,inTransSize-1); //write all uncommited free records final PageIo root = getRootPage(); PageIo slotPage = null; for(int rowIdPos = 0; rowIdPos>>48); final long rowid = inTrans[rowIdPos] & 0x0000FFFFFFFFFFFFL; final int rootPageOffset = sizeToRootOffset(size); long slotPageId = root.readSixByteLong(rootPageOffset); if(slotPageId == 0){ if(slotPage!=null) file.release(slotPage); //create new page for this slot slotPageId = pageman.allocate(Magic.FREEPHYSIDS_PAGE); root.writeSixByteLong(rootPageOffset,slotPageId); } if(slotPage == null || slotPage.getPageId()!=slotPageId){ if(slotPage!=null) file.release(slotPage); slotPage = file.get(slotPageId); } if(slotPage.readShort(Magic.PAGE_HEADER_O_MAGIC) != Magic.PAGE_MAGIC + Magic.FREEPHYSIDS_PAGE) throw new InternalError(); short recidCount = slotPage.readShort(OFFSET_SLOT_PAGE_REC_COUNT); if(recidCount== MAX_RECIDS_PER_PAGE){ file.release(slotPage); //allocate new slot page and update links final long newSlotPageId = pageman.allocate(Magic.FREEPHYSIDS_PAGE); slotPage = file.get(newSlotPageId); slotPage.writeSixByteLong(OFFSET_SLOT_PAGE_NEXT,slotPageId); slotPage.writeShort(OFFSET_SLOT_PAGE_REC_COUNT,(short)0); recidCount = 0; slotPageId = newSlotPageId; root.writeSixByteLong(rootPageOffset,newSlotPageId); } //write new recid slotPage.writeSixByteLong(recidCount * 6 + SLOT_PAGE_HEADER_SIZE,rowid); //and increase count recidCount++; slotPage.writeShort(OFFSET_SLOT_PAGE_REC_COUNT,recidCount); } if(slotPage!=null) file.release(slotPage); file.release(root); clearFreeInTrans(); } public void rollback() { clearFreeInTrans(); } private void clearFreeInTrans() { if(inTrans.length>128) inTrans = new long[8]; inTransSize = 0; } /** return free phys row page. If not found create it */ final PageIo getRootPage() throws IOException { long pageId = pageman.getFirst(Magic.FREEPHYSIDS_ROOT_PAGE); if(pageId == 0){ pageId = pageman.allocate(Magic.FREEPHYSIDS_ROOT_PAGE); } return file.get(pageId); } } ================================================ FILE: src/main/java/org/apache/jdbm/PhysicalRowIdManager.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import static org.apache.jdbm.Storage.*; /** * This class manages physical row ids, and their data. */ final class PhysicalRowIdManager { // The file we're talking to and the associated page manager. final private PageFile file; final private PageManager pageman; final PhysicalFreeRowIdManager freeman; static final private short DATA_PER_PAGE = (short) (PAGE_SIZE - Magic.DATA_PAGE_O_DATA); //caches offset after last allocation. So we dont have to iterate throw page every allocation private long cachedLastAllocatedRecordPage = Long.MIN_VALUE; private short cachedLastAllocatedRecordOffset = Short.MIN_VALUE; /** * Creates a new rowid manager using the indicated record file. and page manager. */ PhysicalRowIdManager(PageFile file, PageManager pageManager) throws IOException { this.file = file; this.pageman = pageManager; this.freeman = new PhysicalFreeRowIdManager(file, pageManager); } /** * Inserts a new record. Returns the new physical rowid. */ long insert(final byte[] data, final int start, final int length) throws IOException { if (length < 1) throw new IllegalArgumentException("Length is <1"); if (start < 0) throw new IllegalArgumentException("negative start"); long retval = alloc(length); write(retval, data, start, length); return retval; } /** * Updates an existing record. Returns the possibly changed physical rowid. */ long update(long rowid, final byte[] data, final int start, final int length) throws IOException { // fetch the record header PageIo page = file.get(rowid>>> Storage.PAGE_SIZE_SHIFT); short head = (short) (rowid & Storage.OFFSET_MASK); int availSize = RecordHeader.getAvailableSize(page, head); if (length > availSize || //difference between free and available space can be only 254. //if bigger, need to realocate and free page availSize - length > RecordHeader.MAX_SIZE_SPACE ) { // not enough space - we need to copy to a new rowid. file.release(page); free(rowid); rowid = alloc(length); } else { file.release(page); } // 'nuff space, write it in and return the rowid. write(rowid, data, start, length); return rowid; } void fetch(final DataInputOutput out, final long rowid) throws IOException { // fetch the record header long current = rowid >>> Storage.PAGE_SIZE_SHIFT; PageIo page = file.get(current); final short head = (short) (rowid & Storage.OFFSET_MASK); // allocate a return buffer // byte[] retval = new byte[ head.getCurrentSize() ]; final int size = RecordHeader.getCurrentSize(page, head); if (size == 0) { file.release(current, false); return; } // copy bytes in int leftToRead = size; short dataOffset = (short) ( head + RecordHeader.SIZE); while (leftToRead > 0) { // copy current page's data to return buffer int toCopy = PAGE_SIZE - dataOffset; if (leftToRead < toCopy) { toCopy = leftToRead; } out.writeFromByteBuffer(page.getData(), dataOffset, toCopy); // Go to the next page leftToRead -= toCopy; // out.flush(); file.release(page); if (leftToRead > 0) { current = pageman.getNext(current); page = file.get(current); dataOffset = Magic.DATA_PAGE_O_DATA; } } // return retval; } /** * Allocate a new rowid with the indicated size. */ private long alloc(int size) throws IOException { size = RecordHeader.roundAvailableSize(size); long retval = freeman.getFreeRecord(size); if (retval == 0) { retval = allocNew(size, pageman.getLast(Magic.USED_PAGE)); } return retval; } /** * Allocates a new rowid. The second parameter is there to allow for a recursive call - it indicates where the * search should start. */ private long allocNew(int size, long start) throws IOException { PageIo curPage; if (start == 0 || //last page was completely filled? cachedLastAllocatedRecordPage == start && cachedLastAllocatedRecordOffset == PAGE_SIZE ) { // we need to create a new page. start = pageman.allocate(Magic.USED_PAGE); curPage = file.get(start); curPage.dataPageSetFirst(Magic.DATA_PAGE_O_DATA); cachedLastAllocatedRecordOffset = Magic.DATA_PAGE_O_DATA; cachedLastAllocatedRecordPage = curPage.getPageId(); RecordHeader.setAvailableSize(curPage, Magic.DATA_PAGE_O_DATA, 0); RecordHeader.setCurrentSize(curPage, Magic.DATA_PAGE_O_DATA, 0); } else { curPage = file.get(start); } // follow the rowids on this page to get to the last one. We don't // fall off, because this is the last page, remember? short pos = curPage.dataPageGetFirst(); if (pos == 0) { // page is exactly filled by the last page of a record file.release(curPage); return allocNew(size, 0); } short hdr = pos; if (cachedLastAllocatedRecordPage != curPage.getPageId() ) { //position was not cached, have to find it again int availSize = RecordHeader.getAvailableSize(curPage, hdr); while (availSize != 0 && pos < PAGE_SIZE) { pos += availSize + RecordHeader.SIZE; if (pos == PAGE_SIZE) { // Again, a filled page. file.release(curPage); return allocNew(size, 0); } hdr = pos; availSize = RecordHeader.getAvailableSize(curPage, hdr); } } else { hdr = cachedLastAllocatedRecordOffset; pos = cachedLastAllocatedRecordOffset; } if (pos == RecordHeader.SIZE) { //TODO why is this here? // the last record exactly filled the page. Restart forcing // a new page. file.release(curPage); } if(hdr>Storage.PAGE_SIZE - 16){ file.release(curPage); //there is not enought space on current page, so force new page return allocNew(size,0); } // we have the position, now tack on extra pages until we've got // enough space. long retval =(start << Storage.PAGE_SIZE_SHIFT) + (long) pos; int freeHere = PAGE_SIZE - pos - RecordHeader.SIZE; if (freeHere < size) { // check whether the last page would have only a small bit left. // if yes, increase the allocation. A small bit is a record // header plus 16 bytes. int lastSize = (size - freeHere) % DATA_PER_PAGE; if (size = DATA_PER_PAGE) { start = pageman.allocate(Magic.USED_PAGE); curPage = file.get(start); curPage.dataPageSetFirst((short) 0); // no rowids, just data file.release(start, true); neededLeft -= DATA_PER_PAGE; } if (neededLeft > 0) { // done with whole chunks, allocate last fragment. start = pageman.allocate(Magic.USED_PAGE); curPage = file.get(start); curPage.dataPageSetFirst((short) (Magic.DATA_PAGE_O_DATA + neededLeft)); file.release(start, true); cachedLastAllocatedRecordOffset = (short) (Magic.DATA_PAGE_O_DATA + neededLeft); cachedLastAllocatedRecordPage = curPage.getPageId(); } } else { // just update the current page. If there's less than 16 bytes // left, we increase the allocation (16 bytes is an arbitrary // number). if (freeHere - size <= (16 + RecordHeader.SIZE)) { size = freeHere; } RecordHeader.setAvailableSize(curPage, hdr, size); file.release(start, true); cachedLastAllocatedRecordOffset = (short) (hdr + RecordHeader.SIZE + size); cachedLastAllocatedRecordPage = curPage.getPageId(); } return retval; } void free(final long id) throws IOException { // get the rowid, and write a zero current size into it. final long curPageId = id >>> Storage.PAGE_SIZE_SHIFT; final PageIo curPage = file.get(curPageId); final short offset = (short) (id & Storage.OFFSET_MASK); RecordHeader.setCurrentSize(curPage, offset, 0); int size = RecordHeader.getAvailableSize(curPage, offset); //trim size if spreads across multiple pages if(offset + RecordHeader.SIZE + size >PAGE_SIZE + (PAGE_SIZE-Magic.DATA_PAGE_O_DATA)){ int numOfPagesToSkip = (size - (Storage.PAGE_SIZE-(offset - RecordHeader.SIZE)) //minus data remaining on this page )/(PAGE_SIZE-Magic.DATA_PAGE_O_DATA); size = size - numOfPagesToSkip * (PAGE_SIZE-Magic.DATA_PAGE_O_DATA); RecordHeader.setAvailableSize(curPage, offset,size); //get next page long nextPage = curPage.pageHeaderGetNext(); file.release(curPage); //release pages for(int i = 0;i>> Storage.PAGE_SIZE_SHIFT; PageIo page = file.get(current); final short hdr = (short) (rowid & Storage.OFFSET_MASK); RecordHeader.setCurrentSize(page, hdr, length); if (length == 0) { file.release(current, true); return; } // copy bytes in int offsetInBuffer = start; int leftToWrite = length; short dataOffset = (short) (hdr + RecordHeader.SIZE); while (leftToWrite > 0) { // copy current page's data to return buffer int toCopy = PAGE_SIZE - dataOffset; if (leftToWrite < toCopy) { toCopy = leftToWrite; } page.writeByteArray(data, offsetInBuffer, dataOffset, toCopy); // Go to the next page leftToWrite -= toCopy; offsetInBuffer += toCopy; file.release(current, true); if (leftToWrite > 0) { current = pageman.getNext(current); page = file.get(current); dataOffset = Magic.DATA_PAGE_O_DATA; } } } void rollback() throws IOException { cachedLastAllocatedRecordPage = Long.MIN_VALUE; cachedLastAllocatedRecordOffset = Short.MIN_VALUE; freeman.rollback(); } void commit() throws IOException { freeman.commit(); } } ================================================ FILE: src/main/java/org/apache/jdbm/RecordHeader.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; /** * The data that comes at the start of a record of data. It stores * both the current size and the avaliable size for the record - the latter * can be bigger than the former, which allows the record to grow without * needing to be moved and which allows the system to put small records * in larger free spots. *

* In JDBM 1.0 both values were stored as four-byte integers. This was very wastefull. * Now available size is stored in two bytes, it is compressed, so maximal value is up to 120 MB (not sure with exact number) * Current size is stored as two-byte-unsigned-short difference from Available Size. */ final class RecordHeader { // offsets private static final short O_CURRENTSIZE = 0; // int currentSize private static final short O_AVAILABLESIZE = Magic.SZ_BYTE; // int availableSize static final int MAX_RECORD_SIZE = 8355839; static final int SIZE = O_AVAILABLESIZE + Magic.SZ_SHORT; /** * Maximal difference between current and available size, * Maximal value is reserved for currentSize 0, so use -1 */ static final int MAX_SIZE_SPACE = 255 - 1; /** * Returns the current size */ static int getCurrentSize(final PageIo page, final short pos) { int s = page.readByte(pos + O_CURRENTSIZE) & 0xFF; if (s == MAX_SIZE_SPACE + 1) return 0; return getAvailableSize(page, pos) - s; } /** * Sets the current size */ static void setCurrentSize(final PageIo page, final short pos, int value) { if (value == 0) { page.writeByte(pos + O_CURRENTSIZE, (byte) (MAX_SIZE_SPACE + 1)); return; } int availSize = getAvailableSize(page, pos); if (value < (availSize - MAX_SIZE_SPACE) || value > availSize) throw new IllegalArgumentException("currentSize out of bounds, need to realocate " + value + " - " + availSize); page.writeByte(pos + O_CURRENTSIZE, (byte) (availSize - value)); } /** * Returns the available size */ static int getAvailableSize(final PageIo page, final short pos) { return deconvertAvailSize(page.readShort(pos + O_AVAILABLESIZE)); } /** * Sets the available size */ static void setAvailableSize(final PageIo page, final short pos, int value) { if (value != roundAvailableSize(value)) throw new IllegalArgumentException("value is not rounded"); int oldCurrSize = getCurrentSize(page, pos); page.writeShort(pos + O_AVAILABLESIZE, convertAvailSize(value)); setCurrentSize(page, pos, oldCurrSize); } static short convertAvailSize(final int recordSize) { if (recordSize <= Short.MAX_VALUE) return (short) recordSize; else { int shift = recordSize - Short.MAX_VALUE; if (shift % MAX_SIZE_SPACE == 0) shift = shift / MAX_SIZE_SPACE; else shift = 1 + shift / MAX_SIZE_SPACE; shift = -shift; return (short) (shift); } } static int deconvertAvailSize(final short converted) { if (converted >= 0) return converted; else { int shifted = -converted; shifted = shifted * MAX_SIZE_SPACE; return Short.MAX_VALUE + shifted; } } static int roundAvailableSize(int value) { if (value > MAX_RECORD_SIZE) new InternalError("Maximal record size (" + MAX_RECORD_SIZE + ") exceeded: " + value); return deconvertAvailSize(convertAvailSize(value)); } } ================================================ FILE: src/main/java/org/apache/jdbm/RecordListener.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; /** * An listener notifed when record is inserted, updated or removed. *

* NOTE: this class was used in JDBM2 to support secondary indexes * JDBM3 does not have a secondary indexes, so this class is not publicly exposed. * * @param key type * @param value type * @author Jan Kotek */ interface RecordListener { void recordInserted(K key, V value) throws IOException; void recordUpdated(K key, V oldValue, V newValue) throws IOException; void recordRemoved(K key, V value) throws IOException; } ================================================ FILE: src/main/java/org/apache/jdbm/SerialClassInfo.java ================================================ package org.apache.jdbm; import org.apache.jdbm.Serialization.FastArrayList; import java.io.*; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; /** * This class stores information about serialized classes and fields. */ abstract class SerialClassInfo { static final Serializer> serializer = new Serializer>() { public void serialize(DataOutput out, ArrayList obj) throws IOException { LongPacker.packInt(out, obj.size()); for (ClassInfo ci : obj) { out.writeUTF(ci.getName()); out.writeBoolean(ci.isEnum); out.writeBoolean(ci.isExternalizable); if(ci.isExternalizable) continue; //no fields LongPacker.packInt(out, ci.fields.size()); for (FieldInfo fi : ci.fields) { out.writeUTF(fi.getName()); out.writeBoolean(fi.isPrimitive()); out.writeUTF(fi.getType()); } } } public ArrayList deserialize(DataInput in) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(in); ArrayList ret = new ArrayList(size); for (int i = 0; i < size; i++) { String className = in.readUTF(); boolean isEnum = in.readBoolean(); boolean isExternalizable = in.readBoolean(); int fieldsNum = isExternalizable? 0 : LongPacker.unpackInt(in); FieldInfo[] fields = new FieldInfo[fieldsNum]; for (int j = 0; j < fieldsNum; j++) { fields[j] = new FieldInfo(in.readUTF(), in.readBoolean(), in.readUTF(), Class.forName(className)); } ret.add(new ClassInfo(className, fields,isEnum,isExternalizable)); } return ret; } }; long serialClassInfoRecid; public SerialClassInfo(DBAbstract db, long serialClassInfoRecid, ArrayList registered){ this.db = db; this.serialClassInfoRecid = serialClassInfoRecid; this.registered = registered; } /** * Stores info about single class stored in JDBM. * Roughly corresponds to 'java.io.ObjectStreamClass' */ static class ClassInfo { private final String name; private final List fields = new ArrayList(); private final Map name2fieldInfo = new HashMap(); private final Map name2fieldId = new HashMap(); private ObjectStreamField[] objectStreamFields; final boolean isEnum; final boolean isExternalizable; ClassInfo(final String name, final FieldInfo[] fields, final boolean isEnum, final boolean isExternalizable) { this.name = name; this.isEnum = isEnum; this.isExternalizable = isExternalizable; for (FieldInfo f : fields) { this.name2fieldId.put(f.getName(), this.fields.size()); this.fields.add(f); this.name2fieldInfo.put(f.getName(), f); } } public String getName() { return name; } public FieldInfo[] getFields() { return (FieldInfo[]) fields.toArray(); } public FieldInfo getField(String name) { return name2fieldInfo.get(name); } public int getFieldId(String name) { Integer fieldId = name2fieldId.get(name); if(fieldId != null) return fieldId; return -1; } public FieldInfo getField(int serialId) { return fields.get(serialId); } public int addFieldInfo(FieldInfo field) { name2fieldId.put(field.getName(), fields.size()); name2fieldInfo.put(field.getName(), field); fields.add(field); return fields.size() - 1; } public ObjectStreamField[] getObjectStreamFields() { return objectStreamFields; } public void setObjectStreamFields(ObjectStreamField[] objectStreamFields) { this.objectStreamFields = objectStreamFields; } } /** * Stores info about single field stored in JDBM. * Roughly corresponds to 'java.io.ObjectFieldClass' */ static class FieldInfo { private final String name; private final boolean primitive; private final String type; private Class typeClass; // Class containing this field private final Class clazz; private Object setter; private int setterIndex; private Object getter; private int getterIndex; public FieldInfo(String name, boolean primitive, String type, Class clazz) { this.name = name; this.primitive = primitive; this.type = type; this.clazz = clazz; try { this.typeClass = Class.forName(type); } catch (ClassNotFoundException e) { this.typeClass = null; } initSetter(); initGetter(); } private void initSetter() { // Set setter String setterName = "set" + firstCharCap(name); String fieldSetterName = clazz.getName() + "#" + setterName; Class aClazz = clazz; // iterate over class hierarchy, until root class while (aClazz != Object.class) { // check if there is getMethod try { Method m = aClazz.getMethod(setterName, typeClass); if (m != null) { setter = m; return; } } catch (Exception e) { // e.printStackTrace(); } // no get method, access field directly try { Field f = aClazz.getDeclaredField(name); // security manager may not be happy about this if (!f.isAccessible()) f.setAccessible(true); setter = f; return; } catch (Exception e) { // e.printStackTrace(); } // move to superclass aClazz = aClazz.getSuperclass(); } } private void initGetter() { // Set setter String getterName = "get" + firstCharCap(name); String fieldSetterName = clazz.getName() + "#" + getterName; Class aClazz = clazz; // iterate over class hierarchy, until root class while (aClazz != Object.class) { // check if there is getMethod try { Method m = aClazz.getMethod(getterName); if (m != null) { getter = m; return; } } catch (Exception e) { // e.printStackTrace(); } // no get method, access field directly try { Field f = aClazz.getDeclaredField(name); // security manager may not be happy about this if (!f.isAccessible()) f.setAccessible(true); getter = f; return; } catch (Exception e) { // e.printStackTrace(); } // move to superclass aClazz = aClazz.getSuperclass(); } } public FieldInfo(ObjectStreamField sf, Class clazz) { this(sf.getName(), sf.isPrimitive(), sf.getType().getName(), clazz); } public String getName() { return name; } public boolean isPrimitive() { return primitive; } public String getType() { return type; } private String firstCharCap(String s) { return Character.toUpperCase(s.charAt(0)) + s.substring(1); } } ArrayList registered; Map class2classId = new HashMap(); Map classId2class = new HashMap(); final DBAbstract db; public void registerClass(Class clazz) throws IOException { if(clazz != Object.class) assertClassSerializable(clazz); if (containsClass(clazz)) return; ObjectStreamField[] streamFields = getFields(clazz); FieldInfo[] fields = new FieldInfo[streamFields.length]; for (int i = 0; i < fields.length; i++) { ObjectStreamField sf = streamFields[i]; fields[i] = new FieldInfo(sf, clazz); } ClassInfo i = new ClassInfo(clazz.getName(), fields,clazz.isEnum(), Externalizable.class.isAssignableFrom(clazz)); class2classId.put(clazz, registered.size()); classId2class.put(registered.size(), clazz); registered.add(i); if (db != null) db.update(serialClassInfoRecid, (Serialization) this, db.defaultSerializationSerializer); } private ObjectStreamField[] getFields(Class clazz) { ObjectStreamField[] fields = null; ClassInfo classInfo = null; Integer classId = class2classId.get(clazz); if (classId != null) { classInfo = registered.get(classId); fields = classInfo.getObjectStreamFields(); } if (fields == null) { ObjectStreamClass streamClass = ObjectStreamClass.lookup(clazz); FastArrayList fieldsList = new FastArrayList(); while (streamClass != null) { for (ObjectStreamField f : streamClass.getFields()) { fieldsList.add(f); } clazz = clazz.getSuperclass(); streamClass = ObjectStreamClass.lookup(clazz); } fields = new ObjectStreamField[fieldsList .size()]; for (int i = 0; i < fields.length; i++) { fields[i] = fieldsList.get(i); } if(classInfo != null) classInfo.setObjectStreamFields(fields); } return fields; } private void assertClassSerializable(Class clazz) throws NotSerializableException, InvalidClassException { if(containsClass(clazz)) return; if (!Serializable.class.isAssignableFrom(clazz)) throw new NotSerializableException(clazz.getName()); } public Object getFieldValue(String fieldName, Object object) { try { registerClass(object.getClass()); } catch (IOException e) { e.printStackTrace(); } ClassInfo classInfo = registered.get(class2classId.get(object.getClass())); return getFieldValue(classInfo.getField(fieldName), object); } public Object getFieldValue(FieldInfo fieldInfo, Object object) { Object fieldAccessor = fieldInfo.getter; try { if (fieldAccessor instanceof Method) { Method m = (Method) fieldAccessor; return m.invoke(object); } else { Field f = (Field) fieldAccessor; return f.get(object); } } catch (Exception e) { } throw new NoSuchFieldError(object.getClass() + "." + fieldInfo.getName()); } public void setFieldValue(String fieldName, Object object, Object value) { try { registerClass(object.getClass()); } catch (IOException e) { e.printStackTrace(); } ClassInfo classInfo = registered.get(class2classId.get(object.getClass())); setFieldValue(classInfo.getField(fieldName), object, value); } public void setFieldValue(FieldInfo fieldInfo, Object object, Object value) { Object fieldAccessor = fieldInfo.setter; try { if (fieldAccessor instanceof Method) { Method m = (Method) fieldAccessor; m.invoke(object, value); } else { Field f = (Field) fieldAccessor; f.set(object, value); } return; } catch (Throwable e) { e.printStackTrace(); } throw new NoSuchFieldError(object.getClass() + "." + fieldInfo.getName()); } public boolean containsClass(Class clazz) { return (class2classId.get(clazz) != null); } public int getClassId(Class clazz) { Integer classId = class2classId.get(clazz); if(classId != null) { return classId; } throw new Error("Class is not registered: " + clazz); } public void writeObject(DataOutput out, Object obj, FastArrayList objectStack) throws IOException { registerClass(obj.getClass()); //write class header int classId = getClassId(obj.getClass()); LongPacker.packInt(out, classId); ClassInfo classInfo = registered.get(classId); if(classInfo.isExternalizable){ Externalizable o = (Externalizable) obj; DataInputOutput out2 = (DataInputOutput) out; try{ out2.serializer = this; out2.objectStack = objectStack; o.writeExternal(out2); }finally { out2.serializer = null; out2.objectStack = null; } return; } if(classInfo.isEnum) { int ordinal = ((Enum)obj).ordinal(); LongPacker.packInt(out, ordinal); } ObjectStreamField[] fields = getFields(obj.getClass()); LongPacker.packInt(out, fields.length); for (ObjectStreamField f : fields) { //write field ID int fieldId = classInfo.getFieldId(f.getName()); if (fieldId == -1) { //field does not exists in class definition stored in db, //propably new field was added so add field descriptor fieldId = classInfo.addFieldInfo(new FieldInfo(f, obj.getClass())); db.update(serialClassInfoRecid, (Serialization) this, db.defaultSerializationSerializer); } LongPacker.packInt(out, fieldId); //and write value Object fieldValue = getFieldValue(classInfo.getField(fieldId), obj); serialize(out, fieldValue, objectStack); } } public Object readObject(DataInput in, FastArrayList objectStack) throws IOException { //read class header try { int classId = LongPacker.unpackInt(in); ClassInfo classInfo = registered.get(classId); // Class clazz = Class.forName(classInfo.getName()); Class clazz = classId2class.get(classId); if(clazz == null) clazz = Class.forName(classInfo.getName()); assertClassSerializable(clazz); Object o; if(classInfo.isEnum) { int ordinal = LongPacker.unpackInt(in); o = clazz.getEnumConstants()[ordinal]; } else { o = createInstance(clazz, Object.class); } objectStack.add(o); if(classInfo.isExternalizable){ Externalizable oo = (Externalizable) o; DataInputOutput in2 = (DataInputOutput) in; try{ in2.serializer = this; in2.objectStack = objectStack; oo.readExternal(in2); }finally { in2.serializer = null; in2.objectStack = null; } }else{ int fieldCount = LongPacker.unpackInt(in); for (int i = 0; i < fieldCount; i++) { int fieldId = LongPacker.unpackInt(in); FieldInfo f = classInfo.getField(fieldId); Object fieldValue = deserialize(in, objectStack); setFieldValue(f, o, fieldValue); } } return o; } catch (Exception e) { throw new Error("Could not instanciate class", e); } } //TODO dependecy on nonpublic JVM API static private sun.reflect.ReflectionFactory rf = sun.reflect.ReflectionFactory.getReflectionFactory(); private static Map class2constuctor = new HashMap(); /** * Little trick to create new instance without using constructor. * Taken from http://www.javaspecialists.eu/archive/Issue175.html */ private static T createInstance(Class clazz, Class parent) { try { Constructor intConstr = class2constuctor.get(clazz); if (intConstr == null) { Constructor objDef = parent.getDeclaredConstructor(); intConstr = rf.newConstructorForSerialization( clazz, objDef); class2constuctor.put(clazz, intConstr); } return clazz.cast(intConstr.newInstance()); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new IllegalStateException("Cannot create object", e); } } protected abstract Object deserialize(DataInput in, FastArrayList objectStack) throws IOException, ClassNotFoundException; protected abstract void serialize(DataOutput out, Object fieldValue, FastArrayList objectStack) throws IOException; // } ================================================ FILE: src/main/java/org/apache/jdbm/Serialization.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.lang.reflect.Array; import java.math.BigDecimal; import java.math.BigInteger; import java.util.*; import static org.apache.jdbm.SerializationHeader.*; /** * Serialization util. It reduces serialized data size for most common java types. *

* Common pattern is one byte header which identifies data type, then size is written (if required) and * data. *

* On unknown types normal java serialization is used *

*

* Header byte values bellow 180 are reserved by author for future use. If you want to customize * this class, use values over 180, to be compatible with future updates. * * @author Jan Kotek */ @SuppressWarnings("unchecked") public class Serialization extends SerialClassInfo implements Serializer { /** * print statistics to STDOUT */ static final boolean DEBUG = false; static final String UTF8 = "UTF-8"; Serialization(DBAbstract db, long serialClassInfoRecid, ArrayList info) throws IOException { super(db, serialClassInfoRecid,info); } public Serialization() { super(null,0L,new ArrayList()); // Add java.lang.Object as registered class registered.add(new ClassInfo(Object.class.getName(), new FieldInfo[]{},false,false)); } /** * Serialize the object into a byte array. */ public byte[] serialize(Object obj) throws IOException { DataInputOutput ba = new DataInputOutput(); serialize(ba, obj); return ba.toByteArray(); } boolean isSerializable(Object obj) { //TODO suboptimal code try { serialize(new DataOutputStream(new ByteArrayOutputStream()), obj); return true; } catch (Exception e) { return false; } } public void serialize(final DataOutput out, final Object obj) throws IOException { serialize(out, obj, null); } public void serialize(final DataOutput out, final Object obj, FastArrayList objectStack) throws IOException { /**try to find object on stack if it exists*/ if (objectStack != null) { int indexInObjectStack = objectStack.identityIndexOf(obj); if (indexInObjectStack != -1) { //object was already serialized, just write reference to it and return out.write(OBJECT_STACK); LongPacker.packInt(out, indexInObjectStack); return; } //add this object to objectStack objectStack.add(obj); } final Class clazz = obj != null ? obj.getClass() : null; /** first try to serialize object without initializing object stack*/ if (obj == null) { out.write(NULL); return; } else if (clazz == Boolean.class) { if (((Boolean) obj).booleanValue()) out.write(BOOLEAN_TRUE); else out.write(BOOLEAN_FALSE); return; } else if (clazz == Integer.class) { final int val = (Integer) obj; writeInteger(out, val); return; } else if (clazz == Double.class) { double v = (Double) obj; if (v == -1d) out.write(DOUBLE_MINUS_1); else if (v == 0d) out.write(DOUBLE_0); else if (v == 1d) out.write(DOUBLE_1); else if (v >= 0 && v <= 255 && (int) v == v) { out.write(DOUBLE_255); out.write((int) v); } else if (v >= Short.MIN_VALUE && v <= Short.MAX_VALUE && (short) v == v) { out.write(DOUBLE_SHORT); out.writeShort((int) v); } else { out.write(DOUBLE_FULL); out.writeDouble(v); } return; } else if (clazz == Float.class) { float v = (Float) obj; if (v == -1f) out.write(FLOAT_MINUS_1); else if (v == 0f) out.write(FLOAT_0); else if (v == 1f) out.write(FLOAT_1); else if (v >= 0 && v <= 255 && (int) v == v) { out.write(FLOAT_255); out.write((int) v); } else if (v >= Short.MIN_VALUE && v <= Short.MAX_VALUE && (short) v == v) { out.write(FLOAT_SHORT); out.writeShort((int) v); } else { out.write(FLOAT_FULL); out.writeFloat(v); } return; } else if (clazz == BigInteger.class) { out.write(BIGINTEGER); byte[] buf = ((BigInteger) obj).toByteArray(); serializeByteArrayInt(out, buf); return; } else if (clazz == BigDecimal.class) { out.write(BIGDECIMAL); BigDecimal d = (BigDecimal) obj; serializeByteArrayInt(out, d.unscaledValue().toByteArray()); LongPacker.packInt(out, d.scale()); return; } else if (clazz == Long.class) { final long val = (Long) obj; writeLong(out, val); return; } else if (clazz == Short.class) { short val = (Short) obj; if (val == -1) out.write(SHORT_MINUS_1); else if (val == 0) out.write(SHORT_0); else if (val == 1) out.write(SHORT_1); else if (val > 0 && val < 255) { out.write(SHORT_255); out.write(val); } else { out.write(SHORT_FULL); out.writeShort(val); } return; } else if (clazz == Byte.class) { byte val = (Byte) obj; if (val == -1) out.write(BYTE_MINUS_1); else if (val == 0) out.write(BYTE_0); else if (val == 1) out.write(BYTE_1); else { out.write(BYTE_FULL); out.writeByte(val); } return; } else if (clazz == Character.class) { out.write(CHAR); out.writeChar((Character) obj); return; } else if (clazz == String.class) { String s = (String) obj; if (s.length() == 0) { out.write(STRING_EMPTY); } else { out.write(STRING); serializeString(out, s); } return; } else if (obj instanceof Class) { out.write(CLASS); serialize(out, ((Class) obj).getName()); return; } else if (obj instanceof int[]) { writeIntArray(out, (int[]) obj); return; } else if (obj instanceof long[]) { writeLongArray(out, (long[]) obj); return; } else if (obj instanceof short[]) { out.write(SHORT_ARRAY); short[] a = (short[]) obj; LongPacker.packInt(out,a.length); for(short s:a) out.writeShort(s); return; } else if (obj instanceof boolean[]) { out.write(BOOLEAN_ARRAY); boolean[] a = (boolean[]) obj; LongPacker.packInt(out,a.length); for(boolean s:a) out.writeBoolean(s); //TODO pack 8 booleans to single byte return; } else if (obj instanceof double[]) { out.write(DOUBLE_ARRAY); double[] a = (double[]) obj; LongPacker.packInt(out,a.length); for(double s:a) out.writeDouble(s); return; } else if (obj instanceof float[]) { out.write(FLOAT_ARRAY); float[] a = (float[]) obj; LongPacker.packInt(out,a.length); for(float s:a) out.writeFloat(s); return; } else if (obj instanceof char[]) { out.write(CHAR_ARRAY); char[] a = (char[]) obj; LongPacker.packInt(out,a.length); for(char s:a) out.writeChar(s); return; } else if (obj instanceof byte[]) { byte[] b = (byte[]) obj; out.write(ARRAY_BYTE_INT); serializeByteArrayInt(out, b); return; } else if (clazz == Date.class) { out.write(DATE); out.writeLong(((Date) obj).getTime()); return; } else if (clazz == UUID.class) { out.write(UUID); serializeUUID(out,(UUID) obj); return; } else if (clazz == BTree.class) { out.write(BTREE); ((BTree) obj).writeExternal(out); return; } else if (clazz == HTree.class) { out.write(HTREE); ((HTree) obj).serialize(out); return; } else if (clazz == LinkedList2.class) { out.write(JDBMLINKEDLIST); ((LinkedList2) obj).serialize(out); return; } /** classes bellow need object stack, so initialize it if not alredy initialized*/ if (objectStack == null) { objectStack = new FastArrayList(); objectStack.add(obj); } if (obj instanceof Object[]) { Object[] b = (Object[]) obj; boolean packableLongs = b.length <= 255; if (packableLongs) { //check if it contains packable longs for (Object o : b) { if (o != null && (o.getClass() != Long.class || (((Long) o).longValue() < 0 && ((Long) o).longValue() != Long.MAX_VALUE))) { packableLongs = false; break; } } } if (packableLongs) { //packable Longs is special case, it is often used in JDBM to reference fields out.write(ARRAY_OBJECT_PACKED_LONG); out.write(b.length); for (Object o : b) { if (o == null) LongPacker.packLong(out, 0); else LongPacker.packLong(out, ((Long) o).longValue() + 1); } } else { out.write(ARRAY_OBJECT); LongPacker.packInt(out, b.length); // Write class id for components Class componentType = obj.getClass().getComponentType(); registerClass(componentType); //write class header int classId = getClassId(componentType); LongPacker.packInt(out, classId); for (Object o : b) serialize(out, o, objectStack); } } else if (clazz == ArrayList.class) { ArrayList l = (ArrayList) obj; boolean packableLongs = l.size() < 255; if (packableLongs) { //packable Longs is special case, it is often used in JDBM to reference fields for (Object o : l) { if (o != null && (o.getClass() != Long.class || (((Long) o).longValue() < 0 && ((Long) o).longValue() != Long.MAX_VALUE))) { packableLongs = false; break; } } } if (packableLongs) { out.write(ARRAYLIST_PACKED_LONG); out.write(l.size()); for (Object o : l) { if (o == null) LongPacker.packLong(out, 0); else LongPacker.packLong(out, ((Long) o).longValue() + 1); } } else { serializeCollection(ARRAYLIST, out, obj, objectStack); } } else if (clazz == java.util.LinkedList.class) { serializeCollection(LINKEDLIST, out, obj, objectStack); } else if (clazz == Vector.class) { serializeCollection(VECTOR, out, obj, objectStack); } else if (clazz == TreeSet.class) { TreeSet l = (TreeSet) obj; out.write(TREESET); LongPacker.packInt(out, l.size()); serialize(out, l.comparator(), objectStack); for (Object o : l) serialize(out, o, objectStack); } else if (clazz == HashSet.class) { serializeCollection(HASHSET, out, obj, objectStack); } else if (clazz == LinkedHashSet.class) { serializeCollection(LINKEDHASHSET, out, obj, objectStack); } else if (clazz == TreeMap.class) { TreeMap l = (TreeMap) obj; out.write(TREEMAP); LongPacker.packInt(out, l.size()); serialize(out, l.comparator(), objectStack); for (Object o : l.keySet()) { serialize(out, o, objectStack); serialize(out, l.get(o), objectStack); } } else if (clazz == HashMap.class) { serializeMap(HASHMAP, out, obj, objectStack); } else if (clazz == IdentityHashMap.class) { serializeMap(IDENTITYHASHMAP, out, obj, objectStack); } else if (clazz == LinkedHashMap.class) { serializeMap(LINKEDHASHMAP, out, obj, objectStack); } else if (clazz == Hashtable.class) { serializeMap(HASHTABLE, out, obj, objectStack); } else if (clazz == Properties.class) { serializeMap(PROPERTIES, out, obj, objectStack); } else if (clazz == Locale.class){ out.write(LOCALE); Locale l = (Locale) obj; out.writeUTF(l.getLanguage()); out.writeUTF(l.getCountry()); out.writeUTF(l.getVariant()); } else { out.write(NORMAL); writeObject(out, obj, objectStack); } } static void serializeString(DataOutput out, String obj) throws IOException { final int len = obj.length(); LongPacker.packInt(out, len); for (int i = 0; i < len; i++) { int c = (int) obj.charAt(i); //TODO investigate if c could be negative here LongPacker.packInt(out, c); } } private void serializeUUID(DataOutput out, UUID uuid) throws IOException { out.writeLong(uuid.getMostSignificantBits()); out.writeLong(uuid.getLeastSignificantBits()); } private void serializeMap(int header, DataOutput out, Object obj, FastArrayList objectStack) throws IOException { Map l = (Map) obj; out.write(header); LongPacker.packInt(out, l.size()); for (Object o : l.keySet()) { serialize(out, o, objectStack); serialize(out, l.get(o), objectStack); } } private void serializeCollection(int header, DataOutput out, Object obj, FastArrayList objectStack) throws IOException { Collection l = (Collection) obj; out.write(header); LongPacker.packInt(out, l.size()); for (Object o : l) serialize(out, o, objectStack); } private void serializeByteArrayInt(DataOutput out, byte[] b) throws IOException { LongPacker.packInt(out, b.length); out.write(b); } private void writeLongArray(DataOutput da, long[] obj) throws IOException { long max = Long.MIN_VALUE; long min = Long.MAX_VALUE; for (long i : obj) { max = Math.max(max, i); min = Math.min(min, i); } if (0 <= min && max <= 255) { da.write(ARRAY_LONG_B); LongPacker.packInt(da, obj.length); for (long l : obj) da.write((int) l); } else if (0 <= min && max <= Long.MAX_VALUE) { da.write(ARRAY_LONG_PACKED); LongPacker.packInt(da, obj.length); for (long l : obj) LongPacker.packLong(da, l); } else if (Short.MIN_VALUE <= min && max <= Short.MAX_VALUE) { da.write(ARRAY_LONG_S); LongPacker.packInt(da, obj.length); for (long l : obj) da.writeShort((short) l); } else if (Integer.MIN_VALUE <= min && max <= Integer.MAX_VALUE) { da.write(ARRAY_LONG_I); LongPacker.packInt(da, obj.length); for (long l : obj) da.writeInt((int) l); } else { da.write(ARRAY_LONG_L); LongPacker.packInt(da, obj.length); for (long l : obj) da.writeLong(l); } } private void writeIntArray(DataOutput da, int[] obj) throws IOException { int max = Integer.MIN_VALUE; int min = Integer.MAX_VALUE; for (int i : obj) { max = Math.max(max, i); min = Math.min(min, i); } boolean fitsInByte = 0 <= min && max <= 255; boolean fitsInShort = Short.MIN_VALUE >= min && max <= Short.MAX_VALUE; if (obj.length <= 255 && fitsInByte) { da.write(ARRAY_INT_B_255); da.write(obj.length); for (int i : obj) da.write(i); } else if (fitsInByte) { da.write(ARRAY_INT_B_INT); LongPacker.packInt(da, obj.length); for (int i : obj) da.write(i); } else if (0 <= min && max <= Integer.MAX_VALUE) { da.write(ARRAY_INT_PACKED); LongPacker.packInt(da, obj.length); for (int l : obj) LongPacker.packInt(da, l); } else if (fitsInShort) { da.write(ARRAY_INT_S); LongPacker.packInt(da, obj.length); for (int i : obj) da.writeShort(i); } else { da.write(ARRAY_INT_I); LongPacker.packInt(da, obj.length); for (int i : obj) da.writeInt(i); } } private void writeInteger(DataOutput da, final int val) throws IOException { if (val == -1) da.write(INTEGER_MINUS_1); else if (val == 0) da.write(INTEGER_0); else if (val == 1) da.write(INTEGER_1); else if (val == 2) da.write(INTEGER_2); else if (val == 3) da.write(INTEGER_3); else if (val == 4) da.write(INTEGER_4); else if (val == 5) da.write(INTEGER_5); else if (val == 6) da.write(INTEGER_6); else if (val == 7) da.write(INTEGER_7); else if (val == 8) da.write(INTEGER_8); else if (val == Integer.MIN_VALUE) da.write(INTEGER_MINUS_MAX); else if (val > 0 && val < 255) { da.write(INTEGER_255); da.write(val); } else if (val < 0) { da.write(INTEGER_PACK_NEG); LongPacker.packInt(da, -val); } else { da.write(INTEGER_PACK); LongPacker.packInt(da, val); } } private void writeLong(DataOutput da, final long val) throws IOException { if (val == -1) da.write(LONG_MINUS_1); else if (val == 0) da.write(LONG_0); else if (val == 1) da.write(LONG_1); else if (val == 2) da.write(LONG_2); else if (val == 3) da.write(LONG_3); else if (val == 4) da.write(LONG_4); else if (val == 5) da.write(LONG_5); else if (val == 6) da.write(LONG_6); else if (val == 7) da.write(LONG_7); else if (val == 8) da.write(LONG_8); else if (val == Long.MIN_VALUE) da.write(LONG_MINUS_MAX); else if (val > 0 && val < 255) { da.write(LONG_255); da.write((int) val); } else if (val < 0) { da.write(LONG_PACK_NEG); LongPacker.packLong(da, -val); } else { da.write(LONG_PACK); LongPacker.packLong(da, val); } } /** * Deserialize an object from a byte array * * @throws IOException * @throws ClassNotFoundException */ public Object deserialize(byte[] buf) throws ClassNotFoundException, IOException { DataInputOutput bs = new DataInputOutput(buf); Object ret = deserialize(bs); if (bs.available() != 0) throw new InternalError("bytes left: " + bs.available()); return ret; } static String deserializeString(DataInput buf) throws IOException { int len = LongPacker.unpackInt(buf); char[] b = new char[len]; for (int i = 0; i < len; i++) b[i] = (char) LongPacker.unpackInt(buf); return new String(b); } public Object deserialize(DataInput is) throws IOException, ClassNotFoundException { return deserialize(is, null); } public Object deserialize(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { Object ret = null; final int head = is.readUnsignedByte(); /** first try to deserialize object without allocating object stack*/ switch (head) { case NULL: break; case BOOLEAN_TRUE: ret = Boolean.TRUE; break; case BOOLEAN_FALSE: ret = Boolean.FALSE; break; case INTEGER_MINUS_1: ret = Integer.valueOf(-1); break; case INTEGER_0: ret = Integer.valueOf(0); break; case INTEGER_1: ret = Integer.valueOf(1); break; case INTEGER_2: ret = Integer.valueOf(2); break; case INTEGER_3: ret = Integer.valueOf(3); break; case INTEGER_4: ret = Integer.valueOf(4); break; case INTEGER_5: ret = Integer.valueOf(5); break; case INTEGER_6: ret = Integer.valueOf(6); break; case INTEGER_7: ret = Integer.valueOf(7); break; case INTEGER_8: ret = Integer.valueOf(8); break; case INTEGER_MINUS_MAX: ret = Integer.valueOf(Integer.MIN_VALUE); break; case INTEGER_255: ret = Integer.valueOf(is.readUnsignedByte()); break; case INTEGER_PACK_NEG: ret = Integer.valueOf(-LongPacker.unpackInt(is)); break; case INTEGER_PACK: ret = Integer.valueOf(LongPacker.unpackInt(is)); break; case LONG_MINUS_1: ret = Long.valueOf(-1); break; case LONG_0: ret = Long.valueOf(0); break; case LONG_1: ret = Long.valueOf(1); break; case LONG_2: ret = Long.valueOf(2); break; case LONG_3: ret = Long.valueOf(3); break; case LONG_4: ret = Long.valueOf(4); break; case LONG_5: ret = Long.valueOf(5); break; case LONG_6: ret = Long.valueOf(6); break; case LONG_7: ret = Long.valueOf(7); break; case LONG_8: ret = Long.valueOf(8); break; case LONG_255: ret = Long.valueOf(is.readUnsignedByte()); break; case LONG_PACK_NEG: ret = Long.valueOf(-LongPacker.unpackLong(is)); break; case LONG_PACK: ret = Long.valueOf(LongPacker.unpackLong(is)); break; case LONG_MINUS_MAX: ret = Long.valueOf(Long.MIN_VALUE); break; case SHORT_MINUS_1: ret = Short.valueOf((short) -1); break; case SHORT_0: ret = Short.valueOf((short) 0); break; case SHORT_1: ret = Short.valueOf((short) 1); break; case SHORT_255: ret = Short.valueOf((short) is.readUnsignedByte()); break; case SHORT_FULL: ret = Short.valueOf(is.readShort()); break; case BYTE_MINUS_1: ret = Byte.valueOf((byte) -1); break; case BYTE_0: ret = Byte.valueOf((byte) 0); break; case BYTE_1: ret = Byte.valueOf((byte) 1); break; case BYTE_FULL: ret = Byte.valueOf(is.readByte()); break; case SHORT_ARRAY: int size = LongPacker.unpackInt(is); ret = new short[size]; for(int i=0;i deserializeArrayList(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); ArrayList s = new ArrayList(size); objectStack.add(s); for (int i = 0; i < size; i++) { s.add(deserialize(is, objectStack)); } return s; } private ArrayList deserializeArrayListPackedLong(DataInput is) throws IOException, ClassNotFoundException { int size = is.readUnsignedByte(); if (size < 0) throw new EOFException(); ArrayList s = new ArrayList(size); for (int i = 0; i < size; i++) { long l = LongPacker.unpackLong(is); if (l == 0) s.add(null); else s.add(Long.valueOf(l - 1)); } return s; } private java.util.LinkedList deserializeLinkedList(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); java.util.LinkedList s = new java.util.LinkedList(); objectStack.add(s); for (int i = 0; i < size; i++) s.add(deserialize(is, objectStack)); return s; } private Vector deserializeVector(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); Vector s = new Vector(size); objectStack.add(s); for (int i = 0; i < size; i++) s.add(deserialize(is, objectStack)); return s; } private HashSet deserializeHashSet(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); HashSet s = new HashSet(size); objectStack.add(s); for (int i = 0; i < size; i++) s.add(deserialize(is, objectStack)); return s; } private LinkedHashSet deserializeLinkedHashSet(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); LinkedHashSet s = new LinkedHashSet(size); objectStack.add(s); for (int i = 0; i < size; i++) s.add(deserialize(is, objectStack)); return s; } private TreeSet deserializeTreeSet(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); TreeSet s = new TreeSet(); objectStack.add(s); Comparator comparator = (Comparator) deserialize(is, objectStack); if (comparator != null) s = new TreeSet(comparator); for (int i = 0; i < size; i++) s.add(deserialize(is, objectStack)); return s; } private TreeMap deserializeTreeMap(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); TreeMap s = new TreeMap(); objectStack.add(s); Comparator comparator = (Comparator) deserialize(is, objectStack); if (comparator != null) s = new TreeMap(comparator); for (int i = 0; i < size; i++) s.put(deserialize(is, objectStack), deserialize(is, objectStack)); return s; } private HashMap deserializeHashMap(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); HashMap s = new HashMap(size); objectStack.add(s); for (int i = 0; i < size; i++) s.put(deserialize(is, objectStack), deserialize(is, objectStack)); return s; } private IdentityHashMap deserializeIdentityHashMap(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); IdentityHashMap s = new IdentityHashMap(size); objectStack.add(s); for (int i = 0; i < size; i++) s.put(deserialize(is, objectStack), deserialize(is, objectStack)); return s; } private LinkedHashMap deserializeLinkedHashMap(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); LinkedHashMap s = new LinkedHashMap(size); objectStack.add(s); for (int i = 0; i < size; i++) s.put(deserialize(is, objectStack), deserialize(is, objectStack)); return s; } private Hashtable deserializeHashtable(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); Hashtable s = new Hashtable(size); objectStack.add(s); for (int i = 0; i < size; i++) s.put(deserialize(is, objectStack), deserialize(is, objectStack)); return s; } private Properties deserializeProperties(DataInput is, FastArrayList objectStack) throws IOException, ClassNotFoundException { int size = LongPacker.unpackInt(is); Properties s = new Properties(); objectStack.add(s); for (int i = 0; i < size; i++) s.put(deserialize(is, objectStack), deserialize(is, objectStack)); return s; } /** * Utility class similar to ArrayList, but with fast identity search. */ static class FastArrayList { private int size = 0; private K[] elementData = (K[]) new Object[8]; K get(int index) { if (index >= size) throw new IndexOutOfBoundsException(); return elementData[index]; } void add(K o) { if (elementData.length == size) { //grow array if necessary elementData = Arrays.copyOf(elementData, elementData.length * 2); } elementData[size] = o; size++; } int size() { return size; } /** * This method is reason why ArrayList is not used. * Search an item in list and returns its index. * It uses identity rather than 'equalsTo' * One could argue that TreeMap should be used instead, * but we do not expect large object trees. * This search is VERY FAST compared to Maps, it does not allocate * new instances or uses method calls. * * @param obj * @return index of object in list or -1 if not found */ int identityIndexOf(Object obj) { for (int i = 0; i < size; i++) { if (obj == elementData[i]) return i; } return -1; } } } ================================================ FILE: src/main/java/org/apache/jdbm/SerializationHeader.java ================================================ package org.apache.jdbm; /** * Header byte, is used at start of each record to indicate data type * WARNING !!! values bellow must be unique !!!!! */ final class SerializationHeader { final static int NULL = 0; final static int NORMAL = 1; final static int BOOLEAN_TRUE = 2; final static int BOOLEAN_FALSE = 3; final static int INTEGER_MINUS_1 = 4; final static int INTEGER_0 = 5; final static int INTEGER_1 = 6; final static int INTEGER_2 = 7; final static int INTEGER_3 = 8; final static int INTEGER_4 = 9; final static int INTEGER_5 = 10; final static int INTEGER_6 = 11; final static int INTEGER_7 = 12; final static int INTEGER_8 = 13; final static int INTEGER_255 = 14; final static int INTEGER_PACK_NEG = 15; final static int INTEGER_PACK = 16; final static int LONG_MINUS_1 = 17; final static int LONG_0 = 18; final static int LONG_1 = 19; final static int LONG_2 = 20; final static int LONG_3 = 21; final static int LONG_4 = 22; final static int LONG_5 = 23; final static int LONG_6 = 24; final static int LONG_7 = 25; final static int LONG_8 = 26; final static int LONG_PACK_NEG = 27; final static int LONG_PACK = 28; final static int LONG_255 = 29; final static int LONG_MINUS_MAX = 30; final static int SHORT_MINUS_1 = 31; final static int SHORT_0 = 32; final static int SHORT_1 = 33; final static int SHORT_255 = 34; final static int SHORT_FULL = 35; final static int BYTE_MINUS_1 = 36; final static int BYTE_0 = 37; final static int BYTE_1 = 38; final static int BYTE_FULL = 39; final static int CHAR = 40; final static int FLOAT_MINUS_1 = 41; final static int FLOAT_0 = 42; final static int FLOAT_1 = 43; final static int FLOAT_255 = 44; final static int FLOAT_SHORT = 45; final static int FLOAT_FULL = 46; final static int DOUBLE_MINUS_1 = 47; final static int DOUBLE_0 = 48; final static int DOUBLE_1 = 49; final static int DOUBLE_255 = 50; final static int DOUBLE_SHORT = 51; final static int DOUBLE_FULL = 52; final static int DOUBLE_ARRAY = 53; final static int BIGDECIMAL = 54; final static int BIGINTEGER = 55; final static int FLOAT_ARRAY = 56; final static int INTEGER_MINUS_MAX = 57; final static int SHORT_ARRAY = 58; final static int BOOLEAN_ARRAY = 59; final static int ARRAY_INT_B_255 = 60; final static int ARRAY_INT_B_INT = 61; final static int ARRAY_INT_S = 62; final static int ARRAY_INT_I = 63; final static int ARRAY_INT_PACKED = 64; final static int ARRAY_LONG_B = 65; final static int ARRAY_LONG_S = 66; final static int ARRAY_LONG_I = 67; final static int ARRAY_LONG_L = 68; final static int ARRAY_LONG_PACKED = 69; final static int CHAR_ARRAY = 70; final static int ARRAY_BYTE_INT = 71; final static int NOTUSED_ARRAY_OBJECT_255 = 72; final static int ARRAY_OBJECT = 73; //special cases for BTree values which stores references final static int ARRAY_OBJECT_PACKED_LONG = 74; final static int ARRAYLIST_PACKED_LONG = 75; final static int STRING_EMPTY = 101; final static int NOTUSED_STRING_255 = 102; final static int STRING = 103; final static int NOTUSED_ARRAYLIST_255 = 104; final static int ARRAYLIST = 105; final static int NOTUSED_TREEMAP_255 = 106; final static int TREEMAP = 107; final static int NOTUSED_HASHMAP_255 = 108; final static int HASHMAP = 109; final static int NOTUSED_LINKEDHASHMAP_255 = 110; final static int LINKEDHASHMAP = 111; final static int NOTUSED_TREESET_255 = 112; final static int TREESET = 113; final static int NOTUSED_HASHSET_255 = 114; final static int HASHSET = 115; final static int NOTUSED_LINKEDHASHSET_255 = 116; final static int LINKEDHASHSET = 117; final static int NOTUSED_LINKEDLIST_255 = 118; final static int LINKEDLIST = 119; final static int NOTUSED_VECTOR_255 = 120; final static int VECTOR = 121; final static int IDENTITYHASHMAP = 122; final static int HASHTABLE = 123; final static int LOCALE = 124; final static int PROPERTIES = 125; final static int CLASS = 126; final static int DATE = 127; final static int UUID = 128; static final int JDBMLINKEDLIST = 159; static final int HTREE = 160; final static int BTREE = 161; static final int BTREE_NODE_LEAF = 162; static final int BTREE_NODE_NONLEAF = 163; static final int HTREE_BUCKET = 164; static final int HTREE_DIRECTORY = 165; /** * used for reference to already serialized object in object graph */ static final int OBJECT_STACK = 166; static final int JAVA_SERIALIZATION = 172; } ================================================ FILE: src/main/java/org/apache/jdbm/Serializer.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; /** * Interface used to provide a serialization mechanism other than a class' normal * serialization. * * @author Alex Boisvert */ public interface Serializer { /** * Serialize the content of an object into a byte array. * * @param out ObjectOutput to save object into * @param obj Object to serialize */ public void serialize(DataOutput out, A obj) throws IOException; /** * Deserialize the content of an object from a byte array. * * @param in to read serialized data from * @return deserialized object * @throws IOException * @throws ClassNotFoundException */ public A deserialize(DataInput in) throws IOException, ClassNotFoundException; } ================================================ FILE: src/main/java/org/apache/jdbm/Storage.java ================================================ package org.apache.jdbm; import java.io.*; import java.nio.ByteBuffer; /** * */ interface Storage { /** * Bite shift used to calculate page size. * If you want to modify page size, do it here. * * 1<<9 = 512 * 1<<10 = 1024 * 1<<11 = 2048 * 1<<12 = 4096 */ int PAGE_SIZE_SHIFT = 12; /** * the lenght of single page. *

*!!! DO NOT MODIFY THI DIRECTLY !!! */ int PAGE_SIZE = 1<< PAGE_SIZE_SHIFT; /** * use 'val & OFFSET_MASK' to quickly get offset within the page; */ long OFFSET_MASK = 0xFFFFFFFFFFFFFFFFL >>> (64-Storage.PAGE_SIZE_SHIFT); void write(long pageNumber, ByteBuffer data) throws IOException; ByteBuffer read(long pageNumber) throws IOException; void forceClose() throws IOException; boolean isReadonly(); DataInputStream readTransactionLog(); void deleteTransactionLog(); void sync() throws IOException; DataOutputStream openTransactionLog() throws IOException; void deleteAllFiles() throws IOException; } ================================================ FILE: src/main/java/org/apache/jdbm/StorageDisk.java ================================================ package org.apache.jdbm; import java.io.*; import java.nio.ByteBuffer; import java.nio.channels.OverlappingFileLockException; import java.util.ArrayList; import java.util.List; import static org.apache.jdbm.StorageDiskMapped.*; /** * Storage which used files on disk to store data */ class StorageDisk implements Storage { private ArrayList rafs = new ArrayList(); private ArrayList rafsTranslation = new ArrayList(); private String fileName; private long lastPageNumber = Long.MIN_VALUE; private boolean readonly; private boolean lockingDisabled; public StorageDisk(String fileName,boolean readonly, boolean lockingDisabled) throws IOException { this.fileName = fileName; this.readonly = readonly; this.lockingDisabled = lockingDisabled; //make sure first file can be opened //lock it try { if(!readonly && !lockingDisabled) getRaf(0).getChannel().tryLock(); } catch (IOException e) { throw new IOException("Could not lock DB file: " + fileName, e); } catch (OverlappingFileLockException e) { throw new IOException("Could not lock DB file: " + fileName, e); } } RandomAccessFile getRaf(long pageNumber) throws IOException { int fileNumber = (int) (Math.abs(pageNumber)/PAGES_PER_FILE ); List c = pageNumber>=0 ? rafs : rafsTranslation; //increase capacity of array lists if needed for (int i = c.size(); i <= fileNumber; i++) { c.add(null); } RandomAccessFile ret = c.get(fileNumber); if (ret == null) { String name = StorageDiskMapped.makeFileName(fileName, pageNumber, fileNumber); ret = new RandomAccessFile(name, readonly?"r":"rw"); c.set(fileNumber, ret); } return ret; } public void write(long pageNumber, ByteBuffer data) throws IOException { if (data.capacity() != PAGE_SIZE) throw new IllegalArgumentException(); long offset = pageNumber * PAGE_SIZE; RandomAccessFile file = getRaf(pageNumber); // if (lastPageNumber + 1 != pageNumber) //TODO cache position again, so seek is not necessary file.seek(Math.abs(offset % (PAGES_PER_FILE* PAGE_SIZE))); file.write(data.array()); lastPageNumber = pageNumber; } public ByteBuffer read(long pageNumber) throws IOException { long offset = pageNumber * PAGE_SIZE; ByteBuffer buffer = ByteBuffer.allocate(PAGE_SIZE); RandomAccessFile file = getRaf(pageNumber); // if (lastPageNumber + 1 != pageNumber) //TODO cache position again, so seek is not necessary file.seek(Math.abs(offset % (PAGES_PER_FILE* PAGE_SIZE))); int remaining = buffer.limit(); int pos = 0; while (remaining > 0) { int read = file.read(buffer.array(), pos, remaining); if (read == -1) { System.arraycopy(PageFile.CLEAN_DATA, 0, buffer.array(), pos, remaining); break; } remaining -= read; pos += read; } lastPageNumber = pageNumber; return buffer; } static final String transaction_log_file_extension = ".t"; public DataOutputStream openTransactionLog() throws IOException { String logName = fileName + transaction_log_file_extension; final FileOutputStream fileOut = new FileOutputStream(logName); return new DataOutputStream(new BufferedOutputStream(fileOut)) { //default implementation of flush on FileOutputStream does nothing, //so we use little workaround to make sure that data were really flushed public void flush() throws IOException { super.flush(); fileOut.flush(); fileOut.getFD().sync(); } }; } public void deleteAllFiles() { deleteTransactionLog(); StorageDiskMapped.deleteFiles(fileName); } /** * Synchronizes the file. */ public void sync() throws IOException { for (RandomAccessFile file : rafs) if (file != null) file.getFD().sync(); for (RandomAccessFile file : rafsTranslation) if (file != null) file.getFD().sync(); } public void forceClose() throws IOException { for (RandomAccessFile f : rafs) { if (f != null) f.close(); } rafs = null; for (RandomAccessFile f : rafsTranslation) { if (f != null) f.close(); } rafsTranslation = null; } public DataInputStream readTransactionLog() { File logFile = new File(fileName + transaction_log_file_extension); if (!logFile.exists()) return null; if (logFile.length() == 0) { logFile.delete(); return null; } DataInputStream ois = null; try { ois = new DataInputStream(new BufferedInputStream(new FileInputStream(logFile))); } catch (FileNotFoundException e) { //file should exists, we check for its presents just a miliseconds yearlier, anyway move on return null; } try { if (ois.readShort() != Magic.LOGFILE_HEADER) throw new Error("Bad magic on log file"); } catch (IOException e) { // corrupted/empty logfile logFile.delete(); return null; } return ois; } public void deleteTransactionLog() { File logFile = new File(fileName + transaction_log_file_extension); if (logFile.exists()) logFile.delete(); } public boolean isReadonly() { return false; } } ================================================ FILE: src/main/java/org/apache/jdbm/StorageDiskMapped.java ================================================ package org.apache.jdbm; import sun.misc.Cleaner; import java.io.*; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.OverlappingFileLockException; import java.util.ArrayList; import java.util.IdentityHashMap; import java.util.List; /** * Disk storage which uses mapped buffers */ class StorageDiskMapped implements Storage { static final String IDR = ".i"; static final String DBR = ".d"; /** * Maximal number of pages in single file. * Calculated so that each file will have 1 GB */ final static long PAGES_PER_FILE = (1024*1024*1024)>>>Storage.PAGE_SIZE_SHIFT; private ArrayList channels = new ArrayList(); private ArrayList channelsTranslation = new ArrayList(); private IdentityHashMap buffers = new IdentityHashMap(); private String fileName; private boolean transactionsDisabled; private boolean readonly; private boolean lockingDisabled; public StorageDiskMapped(String fileName, boolean readonly, boolean transactionsDisabled, boolean lockingDisabled) throws IOException { this.fileName = fileName; this.transactionsDisabled = transactionsDisabled; this.readonly = readonly; this.lockingDisabled = lockingDisabled; //make sure first file can be opened //lock it try { if(!lockingDisabled) getChannel(0).lock(); } catch (IOException e) { throw new IOException("Could not lock DB file: " + fileName, e); } catch (OverlappingFileLockException e) { throw new IOException("Could not lock DB file: " + fileName, e); } } private FileChannel getChannel(long pageNumber) throws IOException { int fileNumber = (int) (Math.abs(pageNumber)/PAGES_PER_FILE ); List c = pageNumber>=0 ? channels : channelsTranslation; //increase capacity of array lists if needed for (int i = c.size(); i <= fileNumber; i++) { c.add(null); } FileChannel ret = c.get(fileNumber); if (ret == null) { String name = makeFileName(fileName, pageNumber, fileNumber); ret = new RandomAccessFile(name, "rw").getChannel(); c.set(fileNumber, ret); buffers.put(ret, ret.map(FileChannel.MapMode.READ_WRITE, 0, ret.size())); } return ret; } static String makeFileName(String fileName, long pageNumber, int fileNumber) { return fileName + (pageNumber>=0 ? DBR : IDR) + "." + fileNumber; } public void write(long pageNumber, ByteBuffer data) throws IOException { if(transactionsDisabled && data.isDirect()){ //if transactions are disabled and this buffer is direct, //changes written into buffer are directly reflected in file. //so there is no need to write buffer second time return; } FileChannel f = getChannel(pageNumber); int offsetInFile = (int) ((Math.abs(pageNumber) % PAGES_PER_FILE)* PAGE_SIZE); MappedByteBuffer b = buffers.get(f); if( b.limit()<=offsetInFile){ //remapping buffer for each newly added page would be slow, //so allocate new size in chunks int increment = Math.min(PAGE_SIZE * 1024,offsetInFile/10); increment -= increment% PAGE_SIZE; long newFileSize = offsetInFile+ PAGE_SIZE + increment; newFileSize = Math.min(PAGES_PER_FILE * PAGE_SIZE, newFileSize); //expand file size f.position(newFileSize - 1); f.write(ByteBuffer.allocate(1)); //unmap old buffer unmapBuffer(b); //remap buffer b = f.map(FileChannel.MapMode.READ_WRITE, 0,newFileSize); buffers.put(f, b); } //write into buffer b.position(offsetInFile); data.rewind(); b.put(data); } private void unmapBuffer(MappedByteBuffer b) { if(b!=null){ Cleaner cleaner = ((sun.nio.ch.DirectBuffer) b).cleaner(); if(cleaner!=null) cleaner.clean(); } } public ByteBuffer read(long pageNumber) throws IOException { FileChannel f = getChannel(pageNumber); int offsetInFile = (int) ((Math.abs(pageNumber) % PAGES_PER_FILE)* PAGE_SIZE); MappedByteBuffer b = buffers.get(f); if(b == null){ //not mapped yet b = f.map(FileChannel.MapMode.READ_WRITE, 0, f.size()); } //check buffers size if(b.limit()<=offsetInFile){ //file is smaller, return empty data return ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer(); } b.position(offsetInFile); ByteBuffer ret = b.slice(); ret.limit(PAGE_SIZE); if(!transactionsDisabled||readonly){ // changes written into buffer will be directly written into file // so we need to protect buffer from modifications ret = ret.asReadOnlyBuffer(); } return ret; } public void forceClose() throws IOException { for(FileChannel f: channels){ if(f==null) continue; f.close(); unmapBuffer(buffers.get(f)); } for(FileChannel f: channelsTranslation){ if(f==null) continue; f.close(); unmapBuffer(buffers.get(f)); } channels = null; channelsTranslation = null; buffers = null; } public void sync() throws IOException { for(MappedByteBuffer b: buffers.values()){ b.force(); } } public DataOutputStream openTransactionLog() throws IOException { String logName = fileName + StorageDisk.transaction_log_file_extension; final FileOutputStream fileOut = new FileOutputStream(logName); return new DataOutputStream(new BufferedOutputStream(fileOut)) { //default implementation of flush on FileOutputStream does nothing, //so we use little workaround to make sure that data were really flushed public void flush() throws IOException { super.flush(); fileOut.flush(); fileOut.getFD().sync(); } }; } public void deleteAllFiles() throws IOException { deleteTransactionLog(); deleteFiles(fileName); } static void deleteFiles(String fileName) { for(int i = 0; true; i++){ String name = makeFileName(fileName,+1, i); File f =new File(name); boolean exists = f.exists(); if(exists && !f.delete()) f.deleteOnExit(); if(!exists) break; } for(int i = 0; true; i++){ String name = makeFileName(fileName,-1, i); File f =new File(name); boolean exists = f.exists(); if(exists && !f.delete()) f.deleteOnExit(); if(!exists) break; } } public DataInputStream readTransactionLog() { File logFile = new File(fileName + StorageDisk.transaction_log_file_extension); if (!logFile.exists()) return null; if (logFile.length() == 0) { logFile.delete(); return null; } DataInputStream ois = null; try { ois = new DataInputStream(new BufferedInputStream(new FileInputStream(logFile))); } catch (FileNotFoundException e) { //file should exists, we check for its presents just a miliseconds yearlier, anyway move on return null; } try { if (ois.readShort() != Magic.LOGFILE_HEADER) throw new Error("Bad magic on log file"); } catch (IOException e) { // corrupted/empty logfile logFile.delete(); return null; } return ois; } public void deleteTransactionLog() { File logFile = new File(fileName + StorageDisk.transaction_log_file_extension); if (logFile.exists()) logFile.delete(); } public boolean isReadonly() { return readonly; } } ================================================ FILE: src/main/java/org/apache/jdbm/StorageMemory.java ================================================ package org.apache.jdbm; import java.io.*; import java.nio.ByteBuffer; /** * Storage which keeps all data in memory. * Data are lost after storage is closed. */ class StorageMemory implements Storage { private LongHashMap pages = new LongHashMap(); private boolean transactionsDisabled; StorageMemory(boolean transactionsDisabled){ this.transactionsDisabled = transactionsDisabled; } public ByteBuffer read(long pageNumber) throws IOException { byte[] data = pages.get(pageNumber); if (data == null) { //out of bounds, so just return empty data return ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer(); }else{ ByteBuffer b = ByteBuffer.wrap(data); if(!transactionsDisabled) return b.asReadOnlyBuffer(); else return b; } } public void write(long pageNumber, ByteBuffer data) throws IOException { if (data.capacity() != PAGE_SIZE) throw new IllegalArgumentException(); byte[] b = pages.get(pageNumber); if(transactionsDisabled && data.hasArray() && data.array() == b){ //already putted directly into array return; } if(b == null) b = new byte[PAGE_SIZE]; data.position(0); data.get(b,0, PAGE_SIZE); pages.put(pageNumber,b); } public void sync() throws IOException { } public void forceClose() throws IOException { pages = null; } private ByteArrayOutputStream transLog; public DataInputStream readTransactionLog() { if (transLog == null) return null; DataInputStream ret = new DataInputStream( new ByteArrayInputStream(transLog.toByteArray())); //read stream header try { ret.readShort(); } catch (IOException e) { throw new IOError(e); } return ret; } public void deleteTransactionLog() { transLog = null; } public DataOutputStream openTransactionLog() throws IOException { if (transLog == null) transLog = new ByteArrayOutputStream(); return new DataOutputStream(transLog); } public void deleteAllFiles() throws IOException { } public boolean isReadonly() { return false; } } ================================================ FILE: src/main/java/org/apache/jdbm/StorageZip.java ================================================ package org.apache.jdbm; import java.io.*; import java.nio.ByteBuffer; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; /** * A read-only storage which reads data from compressed zip archive. *

* To improve performance with compressed archives * each page is stored in separate file (zip archive entry). */ class StorageZip implements Storage { private String zip; private String zip2; private ZipFile z; StorageZip(String zipFile) throws IOException { zip = zipFile; z = new ZipFile(zip); zip2 = "db"; } public void write(long pageNumber, ByteBuffer data) throws IOException { throw new UnsupportedOperationException("readonly"); } public ByteBuffer read(long pageNumber) throws IOException { ByteBuffer data = ByteBuffer.allocate(PAGE_SIZE); ZipEntry e = z.getEntry(zip2 + pageNumber); if(e == null) return ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer(); InputStream i = z.getInputStream(e); new DataInputStream(i).readFully(data.array()); i.close(); return data; } public void forceClose() throws IOException { z.close(); z = null; } public DataInputStream readTransactionLog() { throw new UnsupportedOperationException("readonly"); } public void deleteTransactionLog() { throw new UnsupportedOperationException("readonly"); } public void sync() throws IOException { throw new UnsupportedOperationException("readonly"); } public DataOutputStream openTransactionLog() throws IOException { throw new UnsupportedOperationException("readonly"); } public void deleteAllFiles() throws IOException { } public boolean isReadonly() { return true; } } ================================================ FILE: src/main/java/org/apache/jdbm/Utils.java ================================================ package org.apache.jdbm; import javax.crypto.Cipher; import java.io.DataInput; import java.io.DataOutput; import java.io.IOError; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Comparator; /** * Various utilities used in JDBM */ class Utils { /** * empty string is used as dummy value to represent null values in HashSet and TreeSet */ static final String EMPTY_STRING = ""; public static byte[] encrypt(Cipher cipherIn, ByteBuffer b) { if(cipherIn==null && b.hasArray()) return b.array(); byte[] bb = new byte[Storage.PAGE_SIZE]; b.rewind(); b.get(bb,0,Storage.PAGE_SIZE); return encrypt(cipherIn,bb); } public static byte[] encrypt(Cipher cipherIn, byte[] b) { if (cipherIn == null) return b; try { return cipherIn.doFinal(b); } catch (Exception e) { throw new IOError(e); } } /** * Compares comparables. Default comparator for most of java types */ static final Comparator COMPARABLE_COMPARATOR = new Comparator() { public int compare(Comparable o1, Comparable o2) { return o1 == null && o2 != null ? -1 : (o1 != null && o2 == null ? 1 : o1.compareTo(o2)); } }; static String formatSpaceUsage(long size) { if (size < 1e4) return size + "B"; else if (size < 1e7) return "" + Math.round(1D * size / 1024D) + "KB"; else if (size < 1e10) return "" + Math.round(1D * size / 1e6) + "MB"; else return "" + Math.round(1D * size / 1e9) + "GB"; } static boolean allZeros(byte[] b) { for (int i = 0; i < b.length; i++) { if (b[i] != 0) return false; } return true; } static E max(E e1, E e2, Comparator comp){ if(e1 == null) return e2; if(e2 == null) return e1; if(comp == null) comp = COMPARABLE_COMPARATOR; return comp.compare(e1,e2)<0 ? e2:e1; } static E min(E e1, E e2, Comparator comp){ if(e1 == null) return e2; if(e2 == null) return e1; if(comp == null) comp = COMPARABLE_COMPARATOR; return comp.compare(e1,e2)>0 ? e2:e1; } static final Serializer NULL_SERIALIZER = new Serializer() { public void serialize(DataOutput out, Object obj) throws IOException { out.writeByte(11); } public Object deserialize(DataInput in) throws IOException, ClassNotFoundException { in.readByte(); return null; } }; } ================================================ FILE: src/main/java/org/apache/jdbm/packageXX.html ================================================

WARNING incomplete and missleading doc!!!

This package contains public API and introduction

JDBM intro

Key-Value databases have got a lot of attention recently, but their history is much older. GDBM (predecessor of JDBM) started in 1970 and was called 'pre rational' database. JDBM is under development since 2000. Version 1.0 was in production since 2005 with only a few bugs reported. Version 2.0 adds some features on top of JDBM (most importantly java.util.Map views)

JDBM 2.0 goal is to provide simple and fast persistence. It is very simple to use, it has minimal overhead and standalone JAR takes only 130KB. It is excelent choice for Swing application or Android phone. JDBM also handles huge datasets well and can be used for data processing (author is using it to process astronomical data). The source code is not complicated; it is well readabable and can also be used for teaching. On the other hand, it does not have some important features (concurrent scalability, multiple transaction, annotations, clustering...), which is the reason why it is so simple and small. For example, multiple transaction would introduce a new dimension of problems, such as concurrent updates, optimistic/pesimistic record locking, etc. JDBM does not try to replicate Valdemort, HBase or other more advanced Key Value databases.

JDBM2 is

Not a SQL database
JDBM2 is more low level. With this comes great power (speed, resource usage, no ORM) but also big responsibility. You are responsible for data integrity, partioning, typing etc... Excelent embedded SQL database is H2 (in fact it is faster than JDBM2 in many cases).

Not an Object database
The fact that JDBM2 uses serialization may give you a false sense of security. It does not magically split a huge object graph into smaller pieces, nor does it handle duplicates. With JDBM you may easily end up with single instance being persisted in several copies over a datastore. An object database would do this magic for you as it traverses object graph references and makes sure there are no duplicates in a datastore. Have look at NeoDatis or DB4o

Not at enterprise level
JDBM2 codebase is propably very good and without bugs, but it is a community project. You may easily endup without support. For something more enterprisey have a look at Berkley DB Java Edition from Oracle. BDB has more features, it is more robust, it has better documentation, bigger overhead and comes with a pricetag.

Not distributed
Key Value databases are associated with distributed stores, map reduce etc. JDBM is not distributed, it runs on single computer only. It does not even have a network interface and can not act as a server. You would be propably looking for Valdemort.

JDBM2 overview

JDBM2 has some helpfull features to make it easier to use. It also brings it closer to SQL and helps with data integrity checks and data queries.

Low level node store
This is Key-Value database in its literal mean. Key is a record identifier number (recid) which points to a location in file. Since recid is a physical pointer, new key values must be assgned by store (wherever the free space is found). Value can be any object, serializable to a byte[] array. Page store also provides transaction and cache.

Named objects
Number as an identifier is not very practical. So there is a table that translates Strings to recid. This is recommended approach for persisting singletons.

Primary maps
{@link jdbm.PrimaryTreeMap} and {@link jdbm.PrimaryHashMap} implements java.util.map interface from Java Collections. But they use node store for persistence. So you can create HashMap with bilions of items and worry only about the commits.

Secondary maps
Secondary maps (indexes) provide side information and associations for the primary map. For example, if there is a Person class persisted in the primary map, the secondary maps can provide fast lookup by name, address, age... The secondary maps are 'views' to the primary map and are readonly. They are updated by the primary map automatically.

Cache
JDBM has object instance cache. This reduces the serialization time and disk IO. By default JDBM uses SoftReference cache. If JVM have less then 50MB heap space available, MRU (Most Recently Used) fixed size cache is used instead.

Transactions
JDBM provides transactions with commit and rollback. The transaction mechanism is safe and tested (in usage for the last 5 years). JDBM allows only single concurrent transactions and there are no problems with concurrent updates and locking.

10 things to keep in mind

  • Uncommited data are stored in memory, and if you get OutOfMemoryException you have to make commits more frequently.
  • Keys and values are stored as part of the index nodes. They are instanciated each time the index is searched. If you have larger values (>512 bytes), these may hurt performance and cause OutOfMemoryException
  • If you run into performance problems, use the profiler rather then asking for it over the internet.
  • JDBM caches returned object instances. If you modify an object (like set new name on a person), next time RecordManager may return the object with this modification.
  • Iteration over Maps is not guaranteed if there are changes (for example adding a new entry while iterating). There is no fail fast policy yet. So all iterations over Maps should be synchronized on RecordManager.
  • More memory means better performance; use -Xmx000m generously. JDBM has good SoftReference cache.
  • SoftReference cache may be blocking some memory for other tasks. The memory is released automatically, but it may take longer then you expect. Consider clearing the cache manually with RecordManager.clearCache() before starting a new type of task.
  • It is safe not to close the db before exiting, but if you that there will be a long cleanup upon the next start.
  • JDBM may have problem reclaiming free space after many records are delete/updated. You may want to run RecordManager.defrag() from time to time.
  • A Key-Value db does not support N-M relations easily. It takes a lot of care to handle them correctly.

Core classes for managing persistent objects and processing transactions.

Memory allocation

This document describes the memory allocation structures and algorithms used by jdbm. It is based on a thread in the jdbm-developers mailing list.

  • A block is a fixed length of bytes. Also known as a node.
  • A row is a variable length of bytes. Also known as a record.
  • A slot is a fixed length entry in a given block/node.
  • A node list is a linked list of pages. The head and tail of each node list is maintained in the file header.
Jdbm knows about a few node lists which are pre-defined in Magic, e.g., Magic.USED_PAGE. The FREE, USED, TRANSLATION, FREELOGIDS, and FREEPHYSIDS node lists are used by the jdbm memory allocation policy and are described below.

The translation list consists of a bunch of slots that can be available (free) or unavailable (allocated). If a slot is available, then it contains junk data (it is available to map the logical row id associated with that slot to some physical row id). If it is unavailable, then it contains the block id and offset of the header of a valid (non-deleted) record. "Available" for the translation list is marked by a zero block id for that slot.

The free logical row id list consists of a set of pages that contain slots. Each slot is either available (free) or unavailable (allocated). If it is unavailable, then it contains a reference to the location of the available slot in the translation list. If it is available, then it contains junk data. "Available" slots are marked by a zero block id. A count is maintained of the #of available slots (free row ids) on the node.

As you free a logical row id, you change it's slot in the translation list from unavailable to available, and then *add* entries to the free logical row list. Adding entries to the free logical row list is done by finding an available slot in the free logical row list and replacing the junk data in that slot with the location of the now available slot in the translation list. A count is maintained of the #of available slots (free row ids) on the node.

Whew... now we've freed a logical row id. But what about the physical row id?

Well, the free physical row id list consists of a set of pages that contain slots. Each slot is either available (free) or unavailable (allocated). If it is unavailable, then it contains a reference to the location of the newly freed row's header in the data node. If it is available, then it contains junk data. "Available" slots are marked by a zero block id. A count is maintained of the #of available slots (free row ids) on the node. (Sound familiar?)

As you free a physical row id, you change it's header in the data node from inuse to free (by zeroing the size field of the record header), and then *add* an entry to the free physical row list. Adding entries to the free physical row list consists of finding an available slot, and replacing the junk data in that slot with the location of the newly freed row's header in the data node.

The translation list is used for translating in-use logical row ids to in-use physical row ids. When a physical row id is freed, it is removed from the translation list and added to the free physical row id list.

This allows a complete decoupling of the logical row id from the physical row id, which makes it super easy to do some of the fiddling I'm talking about the coallescing and splitting records.

If you want to get a list of the free records, just enumerate the unavailable entries in the free physical row id list. You don't even need to look up the record header because the length of the record is also stored in the free physical row id list. As you enumerate the list, be sure to not include slots that are available (in the current incarnation of jdbm, I believe the available length is set to 0 to indicate available - we'll be changing that some time soon here, I'm sure).

================================================ FILE: src/test/java/org/apache/jdbm/BTreeBench.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.util.Enumeration; import java.util.Hashtable; /** * Random insertion/removal test for B+Tree data structure. * * @author Alex Boisvert */ public class BTreeBench extends TestCaseWithTestFile { DBAbstract db; /** * Test w/o compression or specialized key or value serializers. * * @throws IOException */ public void test_001() throws IOException { db = newDBCache(); BTree tree = BTree.createInstance(db); doTest(db, tree, 5001); db.close(); } public static void doTest(DB db, BTree tree, int ITERATIONS) throws IOException { long beginTime = System.currentTimeMillis(); Hashtable hash = new Hashtable(); for (int i = 0; i < ITERATIONS; i++) { Long random = new Long(random(0, 64000)); if ((i % 5000) == 0) { long elapsed = System.currentTimeMillis() - beginTime; System.out.println("Iterations=" + i + " Objects=" + tree._entries + ", elapsed=" + elapsed + "ms"); db.commit(); } if (hash.get(random) == null) { //System.out.println( "Insert " + random ); hash.put(random, random); tree.insert(random, random, false); } else { //System.out.println( "Remove " + random ); hash.remove(random); Object removed = (Object) tree.remove(random); if ((removed == null) || (!removed.equals(random))) { throw new IllegalStateException("Remove expected " + random + " got " + removed); } } // tree.assertOrdering(); compare(tree, hash); } } static long random(int min, int max) { return Math.round(Math.random() * (max - min)) + min; } static void compare(BTree tree, Hashtable hash) throws IOException { boolean failed = false; Enumeration enumeration; if (tree._entries != hash.size()) { throw new IllegalStateException("Tree size " + tree._entries + " Hash size " + hash.size()); } enumeration = hash.keys(); while (enumeration.hasMoreElements()) { Long key = enumeration.nextElement(); Long hashValue = hash.get(key); Long treeValue = tree.get(key); if (!hashValue.equals(treeValue)) { System.out.println("Compare expected " + hashValue + " got " + treeValue); failed = true; } } if (failed) { throw new IllegalStateException("Compare failed"); } } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeKeyCompressionTest.java ================================================ package org.apache.jdbm; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.SortedMap; public class BTreeKeyCompressionTest extends TestCaseWithTestFile { static final long size = (long) 1e5; public void testExpand() throws IOException { long init = Long.MAX_VALUE - size * 2; String file = newTestFile(); DB db = new DBStore(file, false, false,false); SortedMap map = db.createTreeMap("aa"); for (long i = init; i < init + size; i++) { map.put(i, ""); } db.commit(); db.defrag(true); db.close(); long fileSize = new File(file + ".dbr.0").length() / 1024; System.out.println("file size: " + fileSize); assertTrue("file is too big, compression failed", fileSize < 1000); } public void testCornersLimitsLong() throws IOException { DB db = newDBCache(); SortedMap map = db.createTreeMap("aa"); ArrayList ll = new ArrayList(); for (Long i = Long.MIN_VALUE; i < Long.MIN_VALUE + 1000; i++) { map.put(i, ""); ll.add(i); } for (Long i = -1000l; i < 1000; i++) { map.put(i, ""); ll.add(i); } for (Long i = Long.MAX_VALUE - 1000; i <= Long.MAX_VALUE && i > 0; i++) { map.put(i, ""); ll.add(i); } db.commit(); db.clearCache(); for (Long i : ll) { assertTrue("failed for " + i, map.containsKey(i)); } assertTrue(!map.containsKey(Long.valueOf(Long.MIN_VALUE + 1000))); assertTrue(!map.containsKey(Long.valueOf(Long.MIN_VALUE + 1001))); assertTrue(!map.containsKey(Long.valueOf(-1001L))); assertTrue(!map.containsKey(Long.valueOf(-1002L))); assertTrue(!map.containsKey(Long.valueOf(1001L))); assertTrue(!map.containsKey(Long.valueOf(1002L))); assertTrue(!map.containsKey(Long.valueOf(Long.MAX_VALUE - 1001))); assertTrue(!map.containsKey(Long.valueOf(Long.MAX_VALUE - 1002))); db.close(); } public void testCornersLimitsInt() throws IOException { DB db = newDBCache(); SortedMap map = db.createTreeMap("aa"); ArrayList ll = new ArrayList(); for (Integer i = Integer.MIN_VALUE; i < Integer.MIN_VALUE + 1000; i++) { map.put(new Integer(i), ""); ll.add(new Integer(i)); } for (Integer i = -1000; i < 1000; i++) { map.put(i, ""); ll.add(i); } for (Integer i = Integer.MAX_VALUE - 1000; i <= Integer.MAX_VALUE && i > 0; i++) { map.put(i, ""); ll.add(i); } db.commit(); db.clearCache(); for (Integer i : ll) { assertTrue("failed for " + i, map.containsKey(i)); } assertTrue(!map.containsKey(Integer.valueOf(Integer.MIN_VALUE + 1000))); assertTrue(!map.containsKey(Integer.valueOf(Integer.MIN_VALUE + 1001))); assertTrue(!map.containsKey(Integer.valueOf(-1001))); assertTrue(!map.containsKey(Integer.valueOf(-1002))); assertTrue(!map.containsKey(Integer.valueOf(1001))); assertTrue(!map.containsKey(Integer.valueOf(1002))); assertTrue(!map.containsKey(Integer.valueOf(Integer.MAX_VALUE - 1001))); assertTrue(!map.containsKey(Integer.valueOf(Integer.MAX_VALUE - 1002))); db.close(); } public void testStrings() throws IOException { long init = Long.MAX_VALUE - size * 2; String file = newTestFile(); DB db = new DBStore(file, false, false,false); SortedMap map = db.createTreeMap("aa"); for (long i = init; i < init + size / 10; i++) { map.put("aaaaa" + i, ""); } db.commit(); db.defrag(true); db.close(); db = new DBStore(file, false, false,false); map = db.getTreeMap("aa"); for (long i = init; i < init + size / 10; i++) { assertTrue(map.containsKey("aaaaa" + i)); } long fileSize = new File(file + ".dbr.0").length() / 1024; System.out.println("file size with Strings: " + fileSize); assertTrue("file is too big, compression failed", fileSize < 120); } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeLeadingValuePackTest.java ================================================ package org.apache.jdbm; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.Random; import junit.framework.TestCase; public class BTreeLeadingValuePackTest extends TestCase { public static class ByteArraySource { byte[] last = new byte[0]; Random r; public ByteArraySource(long seed) { r = new Random(seed); r.nextBytes(last); } public byte[] getBytesWithCommonPrefix(int len, int common) { if (common > last.length) common = last.length; if (common > len) common = len; byte[] out = new byte[len]; System.arraycopy(last, 0, out, 0, common); byte[] xtra = new byte[len - common]; r.nextBytes(xtra); System.arraycopy(xtra, 0, out, common, xtra.length); last = out; return out; } } private void doCompressUncompressTestFor(byte[][] groups) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(baos); //compress for (int i = 0; i < groups.length; i++) { BTreeNode.leadingValuePackWrite(dos, groups[i], i > 0 ? groups[i - 1] : null, 0); } byte[] results = baos.toByteArray(); ByteArrayInputStream bais = new ByteArrayInputStream(results); DataInputStream dis = new DataInputStream(bais); byte[] previous = null; for (int i = 0; i < groups.length; i++) { previous = BTreeNode.leadingValuePackRead(dis, previous, 0); assertTrue(Arrays.equals(groups[i], previous)); } } private byte[][] getIncrementingGroups(int groupCount, long seed, int lenInit, int comInit, int lenIncr, int comIncr) { ByteArraySource bap = new ByteArraySource(seed); byte[][] groups = new byte[groupCount][]; for (int i = 0; i < groupCount; i++) { groups[i] = bap.getBytesWithCommonPrefix(lenInit, comInit); lenInit += lenIncr; comInit += comIncr; } return groups; } public void testCompDecompEqualLenEqualCommon() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 50, // starting byte array length 5, // starting common bytes 0, // length increment 0 // common bytes increment ); doCompressUncompressTestFor(groups); } public void testCompDecompEqualLenIncrCommon() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 50, // starting byte array length 5, // starting common bytes 0, // length increment 2 // common bytes increment ); doCompressUncompressTestFor(groups); } public void testCompDecompEqualLenDecrCommon() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 50, // starting byte array length 40, // starting common bytes 0, // length increment -2 // common bytes increment ); doCompressUncompressTestFor(groups); } public void testCompDecompIncrLenEqualCommon() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 30, // starting byte array length 25, // starting common bytes 1, // length increment 0 // common bytes increment ); doCompressUncompressTestFor(groups); } public void testCompDecompDecrLenEqualCommon() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 50, // starting byte array length 25, // starting common bytes -1, // length increment 0 // common bytes increment ); doCompressUncompressTestFor(groups); } public void testCompDecompNoCommon() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 50, // starting byte array length 0, // starting common bytes -1, // length increment 0 // common bytes increment ); doCompressUncompressTestFor(groups); } public void testCompDecompNullGroups() throws IOException { byte[][] groups = getIncrementingGroups( 5, // number of groups 1000, // seed 50, // starting byte array length 25, // starting common bytes -1, // length increment 0 // common bytes increment ); groups[2] = null; groups[4] = null; doCompressUncompressTestFor(groups); } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeMapNavigable2Test.java ================================================ package org.apache.jdbm; import java.util.*; public class BTreeMapNavigable2Test extends TestCaseWithTestFile { static NavigableMap map; public void setUp() throws Exception { map = newDBNoCache().createTreeMap("test"); map.put(1, "one"); map.put(2, "two"); map.put(3, "three"); map.put(4, "four"); map.put(7, "seven"); map.put(8, "eight"); map.put(9, "nine"); map.put(10, "ten"); } public void testSize() { int i = 8; assertEquals(map.size(), i); while (!map.isEmpty()) { map.remove(map.firstKey()); assertEquals(map.size(), --i); } } public void testContainsKey() { assertTrue(map.containsKey(1)); assertTrue(map.containsKey(2)); assertTrue(map.containsKey(3)); assertTrue(map.containsKey(4)); assertFalse(map.containsKey(5)); assertFalse(map.containsKey(6)); assertTrue(map.containsKey(7)); assertTrue(map.containsKey(8)); assertTrue(map.containsKey(9)); assertTrue(map.containsKey(10)); assertFalse(map.containsKey(999)); assertFalse(map.containsKey(-1)); } public void testContainsValue() { assertTrue(map.containsValue("one")); assertTrue(map.containsValue("two")); assertTrue(map.containsValue("three")); assertTrue(map.containsValue("four")); assertFalse(map.containsValue("five")); assertFalse(map.containsValue("six")); assertTrue(map.containsValue("seven")); assertTrue(map.containsValue("eight")); assertTrue(map.containsValue("nine")); assertTrue(map.containsValue("ten")); assertFalse(map.containsValue("aaaa")); } public void testPut() { assertFalse(map.containsKey(40)); assertFalse(map.containsValue("forty")); map.put(40, "forty"); assertTrue(map.containsKey(40)); assertTrue(map.containsValue("forty")); } public void testLowerEntry() { AbstractMap.Entry e = map.lowerEntry(4); assertEquals(e.getKey(), (Integer)3); } public void testLowerKey() { Integer key = map.lowerKey(4); assertEquals(key, (Integer)3); } public void testFloorEntry() { AbstractMap.Entry e = map.floorEntry(6); assertEquals(e.getKey(), (Integer)4); e = map.floorEntry(7); assertEquals(e.getKey(), (Integer)7); } public void testFloorKey() { Integer key = map.floorKey(6); assertEquals(key, (Integer)4); key = map.floorKey(7); assertEquals(key, (Integer)7); } public void testCeilingEntry() { AbstractMap.Entry e = map.ceilingEntry(6); assertEquals(e.getKey(), (Integer)7); e = map.ceilingEntry(7); assertEquals(e.getKey(), (Integer)7); } public void testCeilingKey() { Integer key = map.ceilingKey(6); assertEquals(key, (Integer)7); key = map.ceilingKey(7); assertEquals(key, (Integer)7); } public void testHigherEntry() { AbstractMap.Entry e = map.higherEntry(4); assertEquals(e.getKey(), (Integer)7); e = map.higherEntry(7); assertEquals(e.getKey(), (Integer)8); } public void testHigherKey() { Integer key = map.higherKey(4); assertEquals(key, (Integer)7); key = map.higherKey(7); assertEquals(key, (Integer)8); } public void testFirstEntry() { assertEquals( map.firstEntry().getKey(), (Integer) 1); } public void testLastEntry() { assertEquals( map.lastEntry().getKey(), (Integer) 10); } public void testPollFirstEntry() { int size0 = map.size(); AbstractMap.Entry e = map.pollFirstEntry(); int size1 = map.size(); assertEquals(size0-1, size1); assertNull(map.get(1)); assertEquals(e.getKey(), (Integer)1); assertEquals(e.getValue(), "one"); } public void testPollLastEntry() { int size0 = map.size(); AbstractMap.Entry e = map.pollLastEntry(); int size1 = map.size(); assertEquals(size0-1, size1); assertNull(map.get(10)); assertEquals(e.getKey(), (Integer)10); assertEquals(e.getValue(), "ten"); } // // TODO implement this // public void testDescendingMap() // { // NavigableMap desMap = map.descendingMap(); // Set> entrySet1 = map.entrySet(); // Set> entrySet2 = desMap.entrySet(); // AbstractMap.Entry[] arr1 = entrySet1.toArray(new AbstractMap.Entry[0]); // AbstractMap.Entry[] arr2 = entrySet2.toArray(new AbstractMap.Entry[0]); // // int size = arr1.length; // assertEquals(arr1.length, arr2.length); // for (int i = 0; i < arr1.length; i++) // { // assertEquals(arr1[i], arr2[size-1-i]); // } // } public void testNavigableKeySet() { int size0 = map.size(); NavigableSet keySet = map.navigableKeySet(); int size1 = keySet.size(); assertEquals(size0, size1); keySet.remove(2); size0 = map.size(); size1 = keySet.size(); assertEquals(size0, size1); assertNull(map.get(2)); } // TODO implement this // // public void testDescendingKeySet() // { // Set keySet1 = map.keySet(); // Set keySet2 = map.descendingKeySet(); // // Integer[] arr1 = keySet1.toArray(new Integer[0]); // Integer[] arr2 = keySet2.toArray(new Integer[0]); // int size = arr1.length; // assertEquals(arr1.length, arr2.length); // for (int i = 0; i < size; i++) // { // assertEquals(arr1[i],arr2[size-1-i]); // } // } public void testSubMap() { SortedMap subMap = map.subMap(3, 8); assertNotNull(subMap.get(3)); assertEquals(subMap.get(3), "three"); assertEquals(subMap.get(4), "four"); assertNull(subMap.get(5)); assertNull(subMap.get(6)); assertEquals(subMap.get(7), "seven"); assertNull(subMap.get(8)); assertNull(subMap.get(2)); assertNull(subMap.get(9)); try { subMap.put(11,"eleven"); fail("Inserted entry outside of submap range"); } catch (IllegalArgumentException e) { assertNull(subMap.get(11)); } } public void testSubMap2() { NavigableMap subMap = map.subMap(3,true,8,false); assertNotNull(subMap.get(3)); assertEquals(subMap.get(3), "three"); assertEquals(subMap.get(4), "four"); assertNull(subMap.get(5)); assertNull(subMap.get(6)); assertEquals(subMap.get(7), "seven"); assertNull(subMap.get(8)); assertNull(subMap.get(2)); assertNull(subMap.get(9)); try { subMap.put(11,"eleven"); fail("Inserted entry outside of submap range"); } catch (IllegalArgumentException e) { assertNull(subMap.get(11)); } } public void testSubMap3() { NavigableMap subMap = map.subMap(2, false, 8, false); assertNotNull(subMap.get(3)); assertEquals(subMap.get(3), "three"); assertEquals(subMap.get(4), "four"); assertNull(subMap.get(5)); assertNull(subMap.get(6)); assertEquals(subMap.get(7), "seven"); assertNull(subMap.get(8)); assertNull(subMap.get(2)); assertNull(subMap.get(9)); try { subMap.put(11,"eleven"); fail("Inserted entry outside of submap range"); } catch (IllegalArgumentException e) { assertNull(subMap.get(11)); } } public void testSubMap4() { NavigableMap subMap = map.subMap(3, true, 7, true); assertNotNull(subMap.get(3)); assertEquals(subMap.get(3), "three"); assertEquals(subMap.get(4), "four"); assertNull(subMap.get(5)); assertNull(subMap.get(6)); assertEquals(subMap.get(7), "seven"); assertNull(subMap.get(8)); assertNull(subMap.get(2)); assertNull(subMap.get(9)); try { subMap.put(11,"eleven"); fail("Inserted entry outside of submap range"); } catch (IllegalArgumentException e) { assertNull(subMap.get(11)); } } public void testHeadMap() { SortedMap subMap = map.headMap(5); assertEquals(subMap.size(), 4); assertNull(subMap.get(5)); assertEquals(subMap.get(1), "one"); try { subMap.put(5, "five"); fail("Inseted data out of bounds of submap."); } catch (IllegalArgumentException e) { assertNull(subMap.get(5)); } } public void testHeadMap2() { NavigableMap subMap = map.headMap(5, false); assertEquals(subMap.size(), 4); assertNull(subMap.get(5)); assertEquals(subMap.get(1), "one"); try { subMap.put(5, "five"); fail("Inseted data out of bounds of submap."); } catch (IllegalArgumentException e) { assertNull(subMap.get(5)); } } public void testHeadMap3() { NavigableMap subMap = map.headMap(5, true); assertEquals(subMap.size(), 4); assertNull(subMap.get(5)); assertEquals(subMap.get(1), "one"); try { subMap.put(5, "five"); assertEquals(subMap.get(5), "five"); } catch (IllegalArgumentException e) { fail("It was not possible to insert a legal value in a submap."); } } public void testHeadMap4() { NavigableMap subMap = map.headMap(8, true); assertEquals(subMap.size(), 6); assertEquals(subMap.get(8), "eight"); assertEquals(subMap.get(1), "one"); try { subMap.put(5, "five"); assertEquals(subMap.get(5), "five"); } catch (IllegalArgumentException e) { fail("It was not possible to insert a legal value in a submap."); } } public void testTailMap() { SortedMap subMap = map.tailMap(5); assertEquals(subMap.size(), 4); assertEquals(subMap.firstKey(), (Integer)7); assertEquals(subMap.lastKey(), (Integer)10); } public void testTailMap2() { SortedMap subMap = map.tailMap(7); assertEquals(subMap.size(), 4); assertEquals(subMap.firstKey(), (Integer)7); assertEquals(subMap.lastKey(), (Integer)10); } public void testTailMap3() { NavigableMap subMap = map.tailMap(7, false); assertEquals(subMap.size(), 3); assertEquals(subMap.firstKey(), (Integer)8); assertEquals(subMap.lastKey(), (Integer)10); } public void testTailMap4() { NavigableMap subMap = map.tailMap(7, true); assertEquals(subMap.size(), 4); assertEquals(subMap.firstKey(), (Integer)7); assertEquals(subMap.lastKey(), (Integer)10); } public void testIsEmpty() { assertFalse(map.isEmpty()); map.clear(); assertTrue(map.isEmpty()); } public void testClearSubmap() { NavigableMap subMap = map.subMap(7, true, 9, true); subMap.clear(); assertEquals(subMap.size(), 0); assertTrue(map.size()==5); assertNull(map.get(7)); assertNull(map.get(8)); assertNull(map.get(9)); } public void testConcurrentModification() { Set> entrySet = map.entrySet(); assertTrue(entrySet.size() > 0); try { for (AbstractMap.Entry e : entrySet) entrySet.remove(e); fail("No concurrentModificationException was thrown"); } catch (ConcurrentModificationException ex){} } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapExclusiveTest.java ================================================ package org.apache.jdbm; public class BTreeMapNavigableSubMapExclusiveTest extends BTreeMapNavigable2Test{ public void setUp() throws Exception { super.setUp(); map.put(-1,"-one"); map.put(0,"zero"); map.put(11,"eleven"); map.put(12,"twelve"); map = map.subMap(0,false,11,false); } public void testPut(){ //this test is not run on submaps } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapInclusiveTest.java ================================================ package org.apache.jdbm; public class BTreeMapNavigableSubMapInclusiveTest extends BTreeMapNavigable2Test{ public void setUp() throws Exception { super.setUp(); map.put(0,"zero"); map.put(11,"eleven"); map = map.subMap(1,true,10,true); } public void testPut(){ //this test is not run on submaps } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeMapNavigableTest.java ================================================ /* * Copyright 2012 Luc Peuvrier * All rights reserved. * * This file is a part of JOAFIP. * * JOAFIP is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License. * * Licensed under the GNU LESSER GENERAL PUBLIC LICENSE * Licensed under the LGPL License, Version 3, 29 June 2007 (the "LGPL License"); * you may not use this file except in compliance with the "LGPL License" extended with here below additional permissions. * You may obtain a copy of the "LGPL License" at * * http://www.gnu.org/licenses/lgpl.html * * Additional permissions extensions for this file: * * Redistribution and use in source and binary forms, with or without modification, * are permitted under the the Apache License, Version 2.0 (the "Apache License") instead of the "LGPL License" * and if following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * You may obtain a copy of the "Apache License" at * * http://www.apache.org/licenses/LICENSE-2.0 * * JOAFIP is distributed in the hope that it will be useful, but * unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.io.IOException; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.SortedMap; import java.util.Map.Entry; /** * to test {@link NavigableMap} implementation * * @author luc peuvrier * */ public class BTreeMapNavigableTest extends TestCaseWithTestFile { private static final String MUST_NOT_CONTAINS_KD = "must not contains 'kd'"; private static final String MUST_NOT_CONTAINS_KA = "must not contains 'ka'"; private static final String BAD_FIRST_ENTRY_KEY = "bad first entry key"; private static final String MUST_NOT_BE_EMPTY = "must not be empty"; private static final String BAD_SIZE = "bad size"; private static final String MUST_CONTAINS_KC = "must contains 'kc'"; private static final String MUST_CONTAINS_KB = "must contains 'kb'"; private static final String MUST_CONTAINS_KA = "must contains 'ka'"; private NavigableMap navigableMap; public void setUp() throws IOException { navigableMap = newDBCache().createTreeMap("test"); } public void testLowerEntry() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); final Entry lowerEntry = navigableMap.lowerEntry("kb"); assertEquals("bad lower entry value", "xx", lowerEntry.getValue()); assertEquals("bad lower entry key", "ka", lowerEntry.getKey()); } public void testLowerKey() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); assertEquals("bad lower key", "ka", navigableMap.lowerKey("kb")); } public void testFloorEntry() { navigableMap.put("ka", "xx"); navigableMap.put("kc", "aa"); navigableMap.put("kd", "zz"); Entry floorEntry = navigableMap.floorEntry("ka"); assertEquals("bad floor entry value", "xx", floorEntry.getValue()); assertEquals("bad floor entry key", "ka", floorEntry.getKey()); floorEntry = navigableMap.floorEntry("kb"); assertEquals("bad floor entry value", "xx", floorEntry.getValue()); assertEquals("bad floor entry key", "ka", floorEntry.getKey()); } public void testFloorKey() { navigableMap.put("ka", "xx"); navigableMap.put("kc", "aa"); navigableMap.put("kd", "zz"); assertEquals("bad floor key", "ka", navigableMap.floorKey("ka")); assertEquals("bad floor key", "ka", navigableMap.floorKey("kb")); } public void testCeilingEntry() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kd", "zz"); Entry ceilingEntry = navigableMap.ceilingEntry("kd"); assertEquals("bad ceiling entry value", "zz", ceilingEntry.getValue()); assertEquals("bad ceiling entry key", "kd", ceilingEntry.getKey()); ceilingEntry = navigableMap.ceilingEntry("kc"); assertEquals("bad ceiling entry value", "zz", ceilingEntry.getValue()); assertEquals("bad ceiling entry key", "kd", ceilingEntry.getKey()); } public void testCeilingKey() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kd", "zz"); assertEquals("bad ceiling key", "kd", navigableMap.ceilingKey("kd")); assertEquals("bad ceiling key", "kd", navigableMap.ceilingKey("kc")); } public void testHigherEntry() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); final Entry higherEntry = navigableMap .higherEntry("kb"); assertEquals("bad higher entry value", "zz", higherEntry.getValue()); assertEquals("bad higher entry key", "kc", higherEntry.getKey()); } public void testHigherKey() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); assertEquals("bad higher key", "kc", navigableMap.higherKey("kb")); } public void testFirstEntry() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); final Entry firstEntry = navigableMap.firstEntry(); assertEquals("bad first entry value", "xx", firstEntry.getValue()); assertEquals(BAD_FIRST_ENTRY_KEY, "ka", firstEntry.getKey()); } public void testLastEntry() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); final Entry lastEntry = navigableMap.lastEntry(); assertEquals("bad last entry value", "zz", lastEntry.getValue()); assertEquals("bad last entry key", "kc", lastEntry.getKey()); } public void testPollFirstEntry() { assertNull("must not have first entry", navigableMap.pollFirstEntry()); navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); assertEquals("must have 3 entries", 3, navigableMap.size()); final Entry firstEntry = navigableMap.pollFirstEntry(); assertNotNull("must have first entry", firstEntry); assertEquals("bad first entry value", "xx", firstEntry.getValue()); assertEquals(BAD_FIRST_ENTRY_KEY, "ka", firstEntry.getKey()); assertEquals("must have 2 entries", 2, navigableMap.size()); } public void testPollLastEntry() { assertNull("must not have last entry", navigableMap.pollLastEntry()); navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); assertEquals("must have 3 entries", 3, navigableMap.size()); final Entry lastEntry = navigableMap.pollLastEntry(); assertNotNull("must have last entry", lastEntry); assertEquals("bad last entry value", "zz", lastEntry.getValue()); assertEquals("bad last entry key", "kc", lastEntry.getKey()); assertEquals("must have 2 entries", 2, navigableMap.size()); } // TODO implement this // // public void testDescendingMap() { // navigableMap.put("ka", "xx"); // navigableMap.put("kb", "aa"); // navigableMap.put("kc", "zz"); // final NavigableMap descendingMap = navigableMap // .descendingMap(); // // assertEquals(BAD_SIZE, 3, descendingMap.size()); // assertFalse(MUST_NOT_BE_EMPTY, descendingMap.isEmpty()); // // final Entry firstEntry = descendingMap.firstEntry(); // assertEquals("bad first entry value", "zz", firstEntry.getValue()); // assertEquals(BAD_FIRST_ENTRY_KEY, "kc", firstEntry.getKey()); // // final Entry lastEntry = descendingMap.lastEntry(); // assertEquals("bad last entry value", "xx", lastEntry.getValue()); // assertEquals("bad last entry key", "ka", lastEntry.getKey()); // // final Set> entrySet = descendingMap.entrySet(); // final Iterator> iterator = entrySet.iterator(); // assertTrue("must have first entry", iterator.hasNext()); // assertEquals(BAD_FIRST_ENTRY_KEY, "kc", iterator.next().getKey()); // assertTrue("must have second entry", iterator.hasNext()); // assertEquals("bad second entry key", "kb", iterator.next().getKey()); // assertTrue("must have third entry", iterator.hasNext()); // assertEquals("bad third entry key", "ka", iterator.next().getKey()); // assertFalse("must not have fourth entry", iterator.hasNext()); // // descendingMap.remove("kb"); // assertEquals(BAD_SIZE, 2, descendingMap.size()); // assertFalse(MUST_NOT_BE_EMPTY, descendingMap.isEmpty()); // // assertEquals(BAD_SIZE, 2, navigableMap.size()); // assertFalse(MUST_NOT_BE_EMPTY, navigableMap.isEmpty()); // assertTrue("must contains key 'ka'", navigableMap.containsKey("ka")); // assertFalse("must not contains key 'kb'", navigableMap // .containsKey("kb")); // assertTrue("must contains key 'kc'", navigableMap.containsKey("kc")); // } public void testNavigableKeySet() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); final NavigableSet navigableSet = navigableMap .navigableKeySet(); assertEquals("bad first element", "ka", navigableSet.first()); assertEquals("bad last element", "kc", navigableSet.last()); assertTrue(MUST_CONTAINS_KA, navigableSet.contains("ka")); assertTrue(MUST_CONTAINS_KB, navigableSet.contains("kb")); assertTrue(MUST_CONTAINS_KC, navigableSet.contains("kc")); navigableSet.remove("kb"); assertEquals(BAD_SIZE, 2, navigableMap.size()); assertFalse(MUST_NOT_BE_EMPTY, navigableMap.isEmpty()); assertTrue("must contains key 'ka'", navigableMap.containsKey("ka")); assertFalse("must not contains key 'kb'", navigableMap .containsKey("kb")); assertTrue("must contains key 'kc'", navigableMap.containsKey("kc")); } // TODO implement this // public void testDescendingKeySet() { // navigableMap.put("ka", "xx"); // navigableMap.put("kb", "aa"); // navigableMap.put("kc", "zz"); // final NavigableSet navigableSet = navigableMap // .descendingKeySet(); // assertEquals("bad first element", "kc", navigableSet.first()); // assertEquals("bad last element", "ka", navigableSet.last()); // assertTrue(MUST_CONTAINS_KA, navigableSet.contains("ka")); // assertTrue(MUST_CONTAINS_KB, navigableSet.contains("kb")); // assertTrue(MUST_CONTAINS_KC, navigableSet.contains("kc")); // // navigableSet.remove("kb"); // assertEquals(BAD_SIZE, 2, navigableMap.size()); // assertFalse(MUST_NOT_BE_EMPTY, navigableMap.isEmpty()); // assertTrue("must contains key 'ka'", navigableMap.containsKey("ka")); // assertFalse("must not contains key 'kb'", navigableMap // .containsKey("kb")); // assertTrue("must contains key 'kc'", navigableMap.containsKey("kc")); // } public void testSubMap() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); navigableMap.put("kd", "uu"); SortedMap sortedMap = navigableMap.subMap("kb", "kd"); assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); sortedMap = navigableMap.subMap("ka", false, "kc", true); assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); } public void testHeadMap() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); navigableMap.put("kd", "uu"); SortedMap sortedMap = navigableMap.headMap("kc"); assertTrue(MUST_CONTAINS_KA, sortedMap.containsKey("ka")); assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); assertFalse("must not contains 'kc'", sortedMap.containsKey("kc")); assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); sortedMap = navigableMap.headMap("kb", true); assertTrue(MUST_CONTAINS_KA, sortedMap.containsKey("ka")); assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); assertFalse("must not contains 'kc'", sortedMap.containsKey("kc")); assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); } public void testTailMap() { navigableMap.put("ka", "xx"); navigableMap.put("kb", "aa"); navigableMap.put("kc", "zz"); navigableMap.put("kd", "uu"); SortedMap sortedMap = navigableMap.tailMap("kc"); assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); assertFalse("must not contains 'kb'", sortedMap.containsKey("kb")); assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); assertTrue("must contains 'kd'", sortedMap.containsKey("kd")); sortedMap = navigableMap.tailMap("kb", false); assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); assertFalse("must not contains 'kb'", sortedMap.containsKey("kb")); assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); assertTrue("must contains 'kd'", sortedMap.containsKey("kd")); } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeMapTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ /* * Copyright (C) 2009 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.io.IOException; import java.util.*; import java.util.Map.Entry; import java.util.concurrent.ConcurrentNavigableMap; /** * This code comes from GoogleCollections, was modified for JDBM by Jan Kotek * * * * Tests representing the contract of {@link SortedMap}. Concrete subclasses of * this base class test conformance of concrete {@link SortedMap} subclasses to * that contract. * * @author Jared Levy * * */ public class BTreeMapTest extends ConcurrentMapInterfaceTest { public BTreeMapTest() { super(false, false, true, true, true, true); } DBAbstract r; public void setUp() throws Exception { r = TestCaseWithTestFile.newDBNoCache(); } @Override protected Integer getKeyNotInPopulatedMap() throws UnsupportedOperationException { return -100; } @Override protected String getValueNotInPopulatedMap() throws UnsupportedOperationException { return "XYZ"; } @Override protected String getSecondValueNotInPopulatedMap() throws UnsupportedOperationException { return "ASD"; } @Override protected ConcurrentNavigableMap makeEmptyMap() throws UnsupportedOperationException { try { BTree b = BTree.createInstance(r); return new BTreeMap(b, false); } catch (IOException e) { throw new RuntimeException(e); } } @Override protected ConcurrentNavigableMap makePopulatedMap() throws UnsupportedOperationException { ConcurrentNavigableMap map = makeEmptyMap(); for (int i = 0; i < 100; i++){ if(i%11==0||i%7==0) continue; map.put(i, "aa" + i); } return map; } @Override protected ConcurrentNavigableMap makeEitherMap() { try { return makePopulatedMap(); } catch (UnsupportedOperationException e) { return makeEmptyMap(); } } @SuppressWarnings("unchecked") // Needed for null comparator public void testOrdering() { final SortedMap map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Iterator iterator = map.keySet().iterator(); Integer prior = iterator.next(); Comparator comparator = map.comparator(); while (iterator.hasNext()) { Integer current = iterator.next(); if (comparator == null) { Comparable comparable = (Comparable) prior; assertTrue(comparable.compareTo(current) < 0); } else { assertTrue(map.comparator().compare(prior, current) < 0); } current = prior; } } public void testFirstKeyEmpty() { final SortedMap map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } assertNull(map.firstKey()); assertInvariants(map); } public void testFirstKeyNonEmpty() { final SortedMap map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Integer expected = map.keySet().iterator().next(); assertEquals(expected, map.firstKey()); assertInvariants(map); } public void testLastKeyEmpty() { final SortedMap map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } assertNull(map.lastKey()); assertInvariants(map); } public void testLastKeyNonEmpty() { final SortedMap map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Integer expected = null; for (Integer key : map.keySet()) { expected = key; } assertEquals(expected, map.lastKey()); assertInvariants(map); } private static List toList(Collection collection) { return new ArrayList(collection); } private static List subListSnapshot( List list, int fromIndex, int toIndex) { List subList = new ArrayList(); for (int i = fromIndex; i < toIndex; i++) { subList.add(list.get(i)); } return Collections.unmodifiableList(subList); } public void testHeadMap() { final NavigableMap map; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } List> list = toList(map.entrySet()); for (int i = 0; i < list.size(); i++) { List> expected = subListSnapshot(list, 0, i); SortedMap headMap = map.headMap(list.get(i).getKey()); assertEquals(expected, toList(headMap.entrySet())); } for (int i = 0; i < list.size(); i++) { List> expected = subListSnapshot(list, 0, i+1); SortedMap headMap = map.headMap(list.get(i).getKey(),true); assertEquals(expected, toList(headMap.entrySet())); } for (int i = 0; i < list.size(); i++) { List> expected = subListSnapshot(list, 0, i); SortedMap headMap = map.headMap(list.get(i).getKey(),false); assertEquals(expected, toList(headMap.entrySet())); } } public void testTailMap() { final NavigableMap map; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } List> list = toList(map.entrySet()); for (int i = 0; i < list.size(); i++) { List> expected = subListSnapshot(list, i, list.size()); SortedMap tailMap = map.tailMap(list.get(i).getKey()); assertEquals(expected, toList(tailMap.entrySet())); } for (int i = 0; i < list.size(); i++) { List> expected = subListSnapshot(list, i, list.size()); SortedMap tailMap = map.tailMap(list.get(i).getKey(),true); assertEquals(expected, toList(tailMap.entrySet())); } for (int i = 0; i < list.size(); i++) { List> expected = subListSnapshot(list, i+1, list.size()); SortedMap tailMap = map.tailMap(list.get(i).getKey(),false); assertEquals(expected, toList(tailMap.entrySet())); } } public void testSubMap() { final NavigableMap map; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } List> list = toList(map.entrySet()); for (int i = 0; i < list.size(); i++) { for (int j = i; j < list.size(); j++) { List> expected = subListSnapshot(list, i, j); SortedMap subMap = map.subMap(list.get(i).getKey(), list.get(j).getKey()); assertEquals(expected, toList(subMap.entrySet())); assertEquals(expected.size(), subMap.size()); assertEquals(expected.size(), subMap.keySet().size()); assertEquals(expected.size(), subMap.entrySet().size()); assertEquals(expected.size(), subMap.values().size()); } } for (int i = 0; i < list.size(); i++) { for (int j = i; j < list.size(); j++) { List> expected = subListSnapshot(list, i, j+1); SortedMap subMap = map.subMap(list.get(i).getKey(), true, list.get(j).getKey(), true); assertEquals(expected, toList(subMap.entrySet())); assertEquals(expected.size(), subMap.size()); assertEquals(expected.size(), subMap.keySet().size()); assertEquals(expected.size(), subMap.entrySet().size()); assertEquals(expected.size(), subMap.values().size()); } } for (int i = 0; i < list.size(); i++) { for (int j = i; j < list.size(); j++) { List> expected = subListSnapshot(list, i+1, j); SortedMap subMap = map.subMap(list.get(i).getKey(), false, list.get(j).getKey(), false); assertEquals(expected, toList(subMap.entrySet())); assertEquals(expected.size(), subMap.size()); assertEquals(expected.size(), subMap.keySet().size()); assertEquals(expected.size(), subMap.entrySet().size()); assertEquals(expected.size(), subMap.values().size()); } } } public void testSubMapIllegal() { final SortedMap map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (map.size() < 2) { return; } Iterator iterator = map.keySet().iterator(); Integer first = iterator.next(); Integer second = iterator.next(); try { map.subMap(second, first); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException expected) { } } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeNodeTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; /** * This class contains all Unit tests for {@link BTreeNode}. * * @author Alex Boisvert */ public class BTreeNodeTest extends TestCaseWithTestFile { /** * Basic tests */ public void testBasics() throws IOException { DBAbstract db; String test, test1, test2, test3; test = "test"; test1 = "test1"; test2 = "test2"; test3 = "test3"; db = newDBCache(); BTree tree = BTree.createInstance(db); BTreeNode node = new BTreeNode(tree, test, test); BTree.BTreeTupleBrowser browser; BTree.BTreeTuple tuple = new BTree.BTreeTuple(); // test insertion node.insert(1, test2, test2, false); node.insert(1, test3, test3, false); node.insert(1, test1, test1, false); // test binary search browser = node.find(1, test2,true); if (browser.getNext(tuple) == false) { throw new IllegalStateException("Browser didn't have 'test2'"); } if (!tuple.key.equals(test2)) { throw new IllegalStateException("Tuple key is not 'test2'"); } if (!tuple.value.equals(test2)) { throw new IllegalStateException("Tuple value is not 'test2'"); } db.close(); db = null; } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeSetTest.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.io.Serializable; import java.util.*; /** * Tests for TreeSet which comes with JDBM. Original code comes from Apache Harmony, * Modified by Jan Kotek for use in JDBM */ public class BTreeSetTest extends TestCaseWithTestFile { private DB db; public static class ReversedIntegerComparator implements Comparator, Serializable { public int compare(Object o1, Object o2) { return -(((Integer) o1).compareTo((Integer) o2)); } public boolean equals(Object o1, Object o2) { return ((Integer) o1).compareTo((Integer) o2) == 0; } } SortedSet ts; Object objArray[] = new Object[1000]; /** * @tests java.util.TreeSet#TreeSet() */ public void test_Constructor() { // Test for method java.util.TreeSet() assertTrue("Did not construct correct TreeSet", db.createTreeSet("test").isEmpty()); } /** * @tests java.util.TreeSet#TreeSet(java.util.Comparator) */ public void test_ConstructorLjava_util_Comparator() { // Test for method java.util.TreeSet(java.util.Comparator) SortedSet myTreeSet = db.createTreeSet("test", new ReversedIntegerComparator(), null); assertTrue("Did not construct correct TreeSet", myTreeSet.isEmpty()); myTreeSet.add(new Integer(1)); myTreeSet.add(new Integer(2)); assertTrue( "Answered incorrect first element--did not use custom comparator ", myTreeSet.first().equals(new Integer(2))); assertTrue( "Answered incorrect last element--did not use custom comparator ", myTreeSet.last().equals(new Integer(1))); } /** * @tests java.util.TreeSet#TreeSet(java.util.SortedSet) */ public void test_ConstructorLjava_util_SortedSet() { // Test for method java.util.TreeSet(java.util.SortedSet) ReversedIntegerComparator comp = new ReversedIntegerComparator(); SortedSet myTreeSet = db.createTreeSet("test", comp, null); for (int i = 0; i < objArray.length; i++) myTreeSet.add(objArray[i]); SortedSet anotherTreeSet = db.getTreeSet("test"); anotherTreeSet.addAll(myTreeSet); assertTrue("TreeSet is not correct size", anotherTreeSet.size() == objArray.length); for (int counter = 0; counter < objArray.length; counter++) assertTrue("TreeSet does not contain correct elements", anotherTreeSet.contains(objArray[counter])); assertEquals("TreeSet does not answer correct comparator", anotherTreeSet .comparator().getClass(),comp.getClass()); assertEquals("TreeSet does not use comparator", anotherTreeSet.first() , objArray[objArray.length - 1]); } /** * @tests java.util.TreeSet#add(java.lang.Object) */ public void test_addLjava_lang_Object() { // Test for method boolean java.util.TreeSet.add(java.lang.Object) ts.add(new Integer(-8)); assertTrue("Failed to add Object", ts.contains(new Integer(-8))); ts.add(objArray[0]); assertTrue("Added existing element", ts.size() == objArray.length + 1); } /** * @tests java.util.TreeSet#addAll(java.util.Collection) */ public void test_addAllLjava_util_Collection() { // Test for method boolean // java.util.TreeSet.addAll(java.util.Collection) SortedSet s = db.createTreeSet("test"); s.addAll(ts); assertTrue("Incorrect size after add", s.size() == ts.size()); Iterator i = ts.iterator(); while (i.hasNext()) assertTrue("Returned incorrect set", s.contains(i.next())); } /** * @tests java.util.TreeSet#clear() */ public void test_clear() { // Test for method void java.util.TreeSet.clear() ts.clear(); assertEquals("Returned non-zero size after clear", 0, ts.size()); assertTrue("Found element in cleared set", !ts.contains(objArray[0])); } /** * @tests java.util.TreeSet#comparator() */ public void test_comparator() { // Test for method java.util.Comparator java.util.TreeSet.comparator() ReversedIntegerComparator comp = new ReversedIntegerComparator(); SortedSet myTreeSet = db.createTreeSet("test", comp, null); assertTrue("Answered incorrect comparator", myTreeSet.comparator() == comp); } /** * @tests java.util.TreeSet#contains(java.lang.Object) */ public void test_containsLjava_lang_Object() { // Test for method boolean java.util.TreeSet.contains(java.lang.Object) assertTrue("Returned false for valid Object", ts .contains(objArray[objArray.length / 2])); assertTrue("Returned true for invalid Object", !ts .contains(new Integer(-9))); } /** * @tests java.util.TreeSet#first() */ public void test_first() { // Test for method java.lang.Object java.util.TreeSet.first() assertEquals("Returned incorrect first element", ts.first(), objArray[0]); } /** * @tests java.util.TreeSet#headSet(java.lang.Object) */ public void test_headSetLjava_lang_Object() { // Test for method java.util.SortedSet // java.util.TreeSet.headSet(java.lang.Object) Set s = ts.headSet(new Integer(100)); assertEquals("Returned set of incorrect size", 100, s.size()); for (int i = 0; i < 100; i++) assertTrue("Returned incorrect set", s.contains(objArray[i])); } /** * @tests java.util.TreeSet#isEmpty() */ public void test_isEmpty() { // Test for method boolean java.util.TreeSet.isEmpty() assertTrue("Empty set returned false", db.createTreeSet("test").isEmpty()); assertTrue("Non-Empty returned true", !ts.isEmpty()); } /** * @tests java.util.TreeSet#iterator() */ public void test_iterator() { // Test for method java.util.Iterator java.util.TreeSet.iterator() SortedSet s = db.createTreeSet("test"); s.addAll(ts); Iterator i = ts.iterator(); Set as = new HashSet(Arrays.asList(objArray)); while (i.hasNext()) as.remove(i.next()); assertEquals("Returned incorrect iterator", 0, as.size()); } /** * @tests java.util.TreeSet#last() */ public void test_last() { // Test for method java.lang.Object java.util.TreeSet.last() assertEquals("Returned incorrect last element", ts.last(),objArray[objArray.length - 1]); } /** * @tests java.util.TreeSet#remove(java.lang.Object) */ public void test_removeLjava_lang_Object() { // Test for method boolean java.util.TreeSet.remove(java.lang.Object) ts.remove(objArray[0]); assertTrue("Failed to remove object", !ts.contains(objArray[0])); assertTrue("Failed to change size after remove", ts.size() == objArray.length - 1); } /** * @tests java.util.TreeSet#size() */ public void test_size() { // Test for method int java.util.TreeSet.size() assertTrue("Returned incorrect size", ts.size() == objArray.length); } /** * @tests java.util.TreeSet#subSet(java.lang.Object, java.lang.Object) */ public void test_subSetLjava_lang_ObjectLjava_lang_Object() { // Test for method java.util.SortedSet // java.util.TreeSet.subSet(java.lang.Object, java.lang.Object) final int startPos = objArray.length / 4; final int endPos = 3 * objArray.length / 4; SortedSet aSubSet = ts.subSet(objArray[startPos], objArray[endPos]); assertTrue("Subset has wrong number of elements", aSubSet.size() == (endPos - startPos)); for (int counter = startPos; counter < endPos; counter++) assertTrue("Subset does not contain all the elements it should", aSubSet.contains(objArray[counter])); int result; try { ts.subSet(objArray[3], objArray[0]); result = 0; } catch (IllegalArgumentException e) { result = 1; } assertEquals("end less than start should throw", 1, result); } /** * @tests java.util.TreeSet#tailSet(java.lang.Object) */ public void test_tailSetLjava_lang_Object() { // Test for method java.util.SortedSet // java.util.TreeSet.tailSet(java.lang.Object) Set s = ts.tailSet(new Integer(900)); assertEquals("Returned set of incorrect size", 100, s.size()); for (int i = 900; i < objArray.length; i++) assertTrue("Returned incorrect set", s.contains(objArray[i])); } /** * Tests equals() method. * Tests that no ClassCastException will be thrown in all cases. * Regression test for HARMONY-1639. */ public void test_equals() throws Exception { // comparing TreeSets with different object types Set s1 = db.createTreeSet("test1"); Set s2 = db.createTreeSet("test2"); s1.add("key1"); s1.add("key2"); s2.add(new Integer(1)); s2.add(new Integer(2)); assertFalse("Sets should not be equal 1", s1.equals(s2)); assertFalse("Sets should not be equal 2", s2.equals(s1)); // comparing TreeSet with HashSet s1 = db.createTreeSet("test"); s2 = new HashSet(); s1.add("key"); s2.add(new Object()); assertFalse("Sets should not be equal 3", s1.equals(s2)); assertFalse("Sets should not be equal 4", s2.equals(s1)); } /** * Sets up the fixture, for example, open a network connection. This method * is called before a test is executed. */ public void setUp() throws Exception { super.setUp(); db = newDBNoCache(); ts = db.createTreeSet("testBTreeSet"); for (int i = 0; i < objArray.length; i++) { Object x = objArray[i] = new Integer(i); ts.add(x); } } /** * Tears down the fixture, for example, close a network connection. This * method is called after a test is executed. */ public void tearDown() throws Exception { db.close(); super.tearDown(); } } ================================================ FILE: src/test/java/org/apache/jdbm/BTreeTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.AssertionFailedError; import junit.framework.TestResult; import java.io.File; import java.io.IOException; import java.io.Serializable; import java.util.AbstractMap.SimpleEntry; import java.util.*; import java.util.concurrent.locks.Lock; /** * This class contains all Unit tests for {@link BTree}. * * @author Alex Boisvert */ public class BTreeTest extends TestCaseWithTestFile { static final boolean DEBUG = false; // the number of threads to be started in the synchronization test static final int THREAD_NUMBER = 5; // the size of the content of the maps for the synchronization // test. Beware that THREAD_NUMBER * THREAD_CONTENT_COUNT < Integer.MAX_VALUE. static final int THREAD_CONTENT_SIZE = 150; // for how long should the threads run. static final int THREAD_RUNTIME = 10 * 1000; protected TestResult result_; /** * Overrides TestCase.run(TestResult), so the errors from threads * started from this thread can be added to the testresult. This is * shown in * http://www.javaworld.com/javaworld/jw-12-2000/jw-1221-junit.html * * @param result the testresult */ public void run(TestResult result) { result_ = result; super.run(result); result_ = null; } //---------------------------------------------------------------------- /** * Handles the exceptions from other threads, so they are not ignored * in the junit test result. This method must be called from every * thread's run() method, if any throwables were throws. * * @param t the throwable (either from an assertEquals, assertTrue, * fail, ... method, or an uncaught exception to be added to the test * result of the junit test. */ protected void handleThreadException(final Throwable t) { synchronized (result_) { if (t instanceof AssertionFailedError) result_.addFailure(this, (AssertionFailedError) t); else result_.addError(this, t); } } /** * Basic tests */ public void testBasics() throws IOException { DBAbstract db; BTree tree; byte[] test, test0, test1, test2, test3; byte[] value1, value2; test = "test".getBytes(); test0 = "test0".getBytes(); test1 = "test1".getBytes(); test2 = "test2".getBytes(); test3 = "test3".getBytes(); value1 = "value1".getBytes(); value2 = "value2".getBytes(); if (DEBUG) { System.out.println("BTreeTest.testBasics"); } db = newDBCache(); tree = BTree.createInstance(db, new ByteArrayComparator(),null,null,true); tree.insert(test1, value1, false); tree.insert(test2, value2, false); byte[] result; result = (byte[]) tree.get(test0); if (result != null) { throw new Error("Test0 shouldn't be found"); } result = (byte[]) tree.get(test1); if (result == null || ByteArrayComparator.compareByteArray(result, value1) != 0) { throw new Error("Invalid value for test1: " + result); } result = (byte[]) tree.get(test2); if (result == null || ByteArrayComparator.compareByteArray(result, value2) != 0) { throw new Error("Invalid value for test2: " + result); } result = (byte[]) tree.get(test3); if (result != null) { throw new Error("Test3 shouldn't be found"); } db.close(); } /** * Basic tests, just use the simple test possibilities of junit (cdaller) */ public void testBasics2() throws IOException { DBAbstract db; BTree tree; byte[] test, test0, test1, test2, test3; byte[] value1, value2; test = "test".getBytes(); test0 = "test0".getBytes(); test1 = "test1".getBytes(); test2 = "test2".getBytes(); test3 = "test3".getBytes(); value1 = "value1".getBytes(); value2 = "value2".getBytes(); if (DEBUG) System.out.println("BTreeTest.testBasics2"); db = newDBCache(); tree = BTree.createInstance(db, new ByteArrayComparator(),null,null,true); tree.insert(test1, value1, false); tree.insert(test2, value2, false); assertEquals(null, tree.get(test0)); assertEquals(0, ByteArrayComparator.compareByteArray(value1, (byte[]) tree.get(test1))); assertEquals(0, ByteArrayComparator.compareByteArray(value2, (byte[]) tree.get(test2))); assertEquals(null, (byte[]) tree.get(test3)); db.close(); } /** * Test what happens after the dbager has been closed but the * btree is accessed. WHAT SHOULD HAPPEN??????????? * (cdaller) */ public void testClose() throws IOException { DBAbstract db; BTree tree; byte[] test, test0, test1, test2, test3; byte[] value1, value2; test = "test".getBytes(); test0 = "test0".getBytes(); test1 = "test1".getBytes(); test2 = "test2".getBytes(); test3 = "test3".getBytes(); value1 = "value1".getBytes(); value2 = "value2".getBytes(); if (DEBUG) System.out.println("BTreeTest.testClose"); db = newDBCache(); tree = BTree.createInstance(db, new ByteArrayComparator(),null,null,true); tree.insert(test1, value1, false); tree.insert(test2, value2, false); assertEquals(null, tree.get(test0)); assertEquals(0, ByteArrayComparator.compareByteArray(value1, (byte[]) tree.get(test1))); assertEquals(0, ByteArrayComparator.compareByteArray(value2, (byte[]) tree.get(test2))); assertEquals(null, (byte[]) tree.get(test3)); db.close(); try { tree.browse(); fail("Should throw an IllegalStateException on access on not opened btree"); } catch (IllegalStateException except) { // ignore } try { tree.get(test0); fail("Should throw an IllegalStateException on access on not opened btree"); } catch (IllegalStateException except) { // ignore } try { tree.findGreaterOrEqual(test0); fail("Should throw an IllegalStateException on access on not opened btree"); } catch (IllegalStateException except) { // ignore } try { tree.insert(test2, value2, false); fail("Should throw an IllegalStateException on access on not opened btree"); } catch (IllegalStateException except) { // ignore } try { tree.remove(test0); fail("Should throw an IllegalStateException on access on not opened btree"); } catch (IllegalStateException except) { // ignore } /* try { tree.size(); fail( "Should throw an IllegalStateException on access on not opened btree" ); } catch( IllegalStateException except ) { // ignore } */ } /** * Test to insert different objects into one btree. (cdaller) */ public void testInsert() throws IOException { DBAbstract db; BTree tree; if (DEBUG) System.out.println("BTreeTest.testInsert"); db = newDBCache(); tree = BTree.createInstance(db); // insert differnt objects and retrieve them tree.insert("test1", "value1", false); tree.insert("test2", "value2", false); tree.insert("one", new Integer(1), false); tree.insert("two", new Long(2), false); tree.insert("myownobject", new ObjectTT(new Integer(234)), false); assertEquals("value2", (String) tree.get("test2")); assertEquals("value1", (String) tree.get("test1")); assertEquals(new Integer(1), (Integer) tree.get("one")); assertEquals(new Long(2), (Long) tree.get("two")); // what happens here? must not be replaced, does it return anything? // probably yes! assertEquals("value1", tree.insert("test1", "value11", false)); assertEquals("value1", tree.get("test1")); // still the old value? assertEquals("value1", tree.insert("test1", "value11", true)); assertEquals("value11", tree.get("test1")); // now the new value! ObjectTT expected_obj = new ObjectTT(new Integer(234)); ObjectTT btree_obj = (ObjectTT) tree.get("myownobject"); assertEquals(expected_obj, btree_obj); db.close(); } /** * Test to remove objects from the btree. (cdaller) */ public void testRemove() throws IOException { DBAbstract db; BTree tree; if (DEBUG) { System.out.println("BTreeTest.testRemove"); } db = newDBCache(); tree = BTree.createInstance(db); tree.insert("test1", "value1", false); tree.insert("test2", "value2", false); assertEquals("value1", (String) tree.get("test1")); assertEquals("value2", (String) tree.get("test2")); tree.remove("test1"); assertEquals(null, (String) tree.get("test1")); assertEquals("value2", (String) tree.get("test2")); tree.remove("test2"); assertEquals(null, (String) tree.get("test2")); int iterations = 1000; for (int count = 0; count < iterations; count++) { tree.insert("num" + count, new Integer(count), false); } assertEquals(iterations, tree._entries); for (int count = 0; count < iterations; count++) { assertEquals(new Integer(count), tree.get("num" + count)); } for (int count = 0; count < iterations; count++) { tree.remove("num" + count); } assertEquals(0, tree._entries); db.close(); } /** * Test to get differents objects in the btree. (cdaller) */ public void testFind() throws IOException { DBAbstract db; BTree tree; if (DEBUG) System.out.println("BTreeTest.testFind"); db = newDBCache(); tree = BTree.createInstance(db); tree.insert("test1", "value1", false); tree.insert("test2", "value2", false); Object value = tree.get("test1"); assertTrue(value instanceof String); assertEquals("value1", value); tree.insert("", "Empty String as key", false); assertEquals("Empty String as key", (String) tree.get("")); assertEquals(null, (String) tree.get("someoneelse")); db.close(); } /** * Test deletion of btree from record manager. (kday) *

* After deletion, the BTree and all of it's BTreeNode children (and their children) * should be removed from the recordmanager. */ public void testDelete() throws IOException { if (DEBUG) System.out.println("BTreeTest.testFind"); DBAbstract db = newDBCache(); BTree tree = BTree.createInstance(db); // put enough data into the tree so we definitely have multiple nodes for (int count = 1; count <= 1000; count++) { tree.insert("num" + count, new Integer(count), false); if (count % 100 == 0) db.commit(); } List out = new ArrayList(); tree.dumpChildNodeRecIDs(out); assertTrue(out.size() > 0); } /** * Test to insert, retrieve and remove a large amount of data. (cdaller) */ public void testLargeDataAmount() throws IOException { DBAbstract db; BTree tree; if (DEBUG) System.out.println("BTreeTest.testLargeDataAmount"); db = newDBCache(); // db = new jdbm.db.BaseRecordManager( "test" ); tree = BTree.createInstance(db); // tree.setSplitPoint( 4 ); int iterations = 10000; // insert data for (int count = 0; count < iterations; count++) { try { assertEquals(null, tree.insert("num" + count, new Integer(count), false)); } catch (IOException except) { except.printStackTrace(); throw except; } } // get data for (int count = 0; count < iterations; count++) { assertEquals(new Integer(count), tree.get("num" + count)); } // delete data for (int count = 0; count < iterations; count++) { assertEquals(new Integer(count), tree.remove("num" + count)); } assertEquals(0, tree._entries); db.close(); } public void testRecordListener() throws IOException { DBAbstract db = newDBCache(); BTree tree = BTree.createInstance(db); final List> dels = new ArrayList(); final List> ins = new ArrayList(); final List> updNew = new ArrayList(); final List> updOld = new ArrayList(); tree.addRecordListener(new RecordListener() { public void recordUpdated(Integer key, String oldValue, String newValue) throws IOException { updOld.add(new SimpleEntry(key, oldValue)); updNew.add(new SimpleEntry(key, newValue)); } public void recordRemoved(Integer key, String value) throws IOException { dels.add(new SimpleEntry(key, value)); } public void recordInserted(Integer key, String value) throws IOException { ins.add(new SimpleEntry(key, value)); } }); //test insert tree.insert(11, "aa11", true); tree.insert(12, "aa12", true); assertTrue(ins.contains(new SimpleEntry(11, "aa11"))); assertTrue(ins.contains(new SimpleEntry(12, "aa12"))); assertTrue(ins.size() == 2); ins.clear(); assertTrue(dels.isEmpty()); assertTrue(updNew.isEmpty()); assertTrue(updOld.isEmpty()); //test update tree.insert(12, "aa123", true); assertTrue(ins.isEmpty()); assertTrue(dels.isEmpty()); assertTrue(updOld.contains(new SimpleEntry(12, "aa12"))); assertTrue(updOld.size() == 1); updOld.clear(); assertTrue(updNew.contains(new SimpleEntry(12, "aa123"))); assertTrue(updNew.size() == 1); updNew.clear(); //test remove tree.remove(11); assertTrue(dels.contains(new SimpleEntry(11, "aa11"))); assertTrue(dels.size() == 1); dels.clear(); assertTrue(ins.isEmpty()); assertTrue(updOld.isEmpty()); assertTrue(updNew.isEmpty()); } /** * Tests the corner case of deleting all nodes from the tree. In this case, all BTreeNodes * associated with the tree should be removed from the db. *

* We are also going to test to make sure the db file doesn't grow (leak) if we repeat the * process a number of times. * * @throws Exception */ public void testDeleteAllNodes() throws Exception { // we are going to run this test without object cache enabled. If it is turned on, // we will have problems with using a different deserializer for BTreeNodes than the standard // serializer. String recordManagerBasename = newTestFile(); String recordManagerDBname = recordManagerBasename + ".d.0"; long previousdbSize = 0; for (int i = 0; i < 5; i++) { DBAbstract db = (DBAbstract) DBMaker.openFile(recordManagerBasename).disableCache().make(); BTree tree = BTree.createInstance(db); String[] keys = new String[1000]; for (int count = 0; count < 1000; count++) { keys[count] = "num" + count; } // put enough data into the tree so we definitely have multiple nodes for (int count = 0; count < 1000; count++) { tree.insert(keys[count], new Integer(count), false); if (count % 100 == 0) db.commit(); } db.commit(); long currentdbSize = new File(recordManagerDBname).length(); assertTrue("file size too small " + currentdbSize, currentdbSize > 0); // now remove it all for (int count = 0; count < 1000; count++) { tree.remove(keys[count]); if (count % 100 == 0) db.commit(); } db.commit(); BTreeNode root = tree.getRoot(); assertNull(root); db.close(); currentdbSize = new File(recordManagerDBname).length(); assertTrue("file size too small " + currentdbSize, currentdbSize > 0); if (previousdbSize != 0) { assertTrue(currentdbSize == previousdbSize); } } } /** * Test access from multiple threads. Assertions only work, when the * run() method is overridden and the exceptions of the threads are * added to the resultset of the TestCase. see run() and * handleException(). */ public void testMultithreadAccess() throws IOException { DBAbstract db; BTree tree; if (DEBUG) System.out.println("BTreeTest.testMultithreadAccess"); db = newDBCache(); tree = BTree.createInstance(db); TestThread[] thread_pool = new TestThread[THREAD_NUMBER]; String name; Map content; // create content for the tree, different content for different threads! for (int thread_count = 0; thread_count < THREAD_NUMBER; thread_count++) { name = "thread" + thread_count; content = new TreeMap(); for (int content_count = 0; content_count < THREAD_CONTENT_SIZE; content_count++) { // guarantee, that keys and values do not overleap, // otherwise one thread removes some keys/values of // other threads! content.put(name + "_" + content_count, new Integer(thread_count * THREAD_CONTENT_SIZE + content_count)); } thread_pool[thread_count] = new TestThread(name, tree, content); thread_pool[thread_count].start(); } try { Thread.sleep(THREAD_RUNTIME); } catch (InterruptedException ignore) { ignore.printStackTrace(); } // stop threads: for (int thread_count = 0; thread_count < THREAD_NUMBER; thread_count++) { if (DEBUG) System.out.println("Stop threads"); thread_pool[thread_count].setStop(); } // wait until the threads really stop: try { for (int thread_count = 0; thread_count < THREAD_NUMBER; thread_count++) { if (DEBUG) System.out.println("Join thread " + thread_count); thread_pool[thread_count].join(); if (DEBUG) System.out.println("Joined thread " + thread_count); } } catch (InterruptedException ignore) { ignore.printStackTrace(); } db.close(); } /** * Helper method to 'simulate' the methods of an entry set of the btree. */ protected static boolean containsKey(Object key, BTree btree) throws IOException { return (btree.get(key) != null); } /** * Helper method to 'simulate' the methods of an entry set of the btree. */ protected static boolean containsValue(Object value, BTree btree) throws IOException { // we must synchronize on the BTree while browsing Lock readLock = btree.getLock().readLock(); try { readLock.lock(); BTree.BTreeTupleBrowser browser = btree.browse(); BTree.BTreeTuple tuple = new BTree.BTreeTuple(); while (browser.getNext(tuple)) { if (tuple.value.equals(value)) return (true); } } finally { readLock.unlock(); } // System.out.println("Comparation of '"+value+"' with '"+ tuple.getValue()+"' FAILED"); return (false); } /** * Helper method to 'simulate' the methods of an entry set of the btree. */ protected static boolean contains(Map.Entry entry, BTree btree) throws IOException { Object tree_obj = btree.get(entry.getKey()); if (tree_obj == null) { // can't distuingish, if value is null or not found!!!!!! return (entry.getValue() == null); } return (tree_obj.equals(entry.getValue())); } /** * Inner class for testing puroposes only (multithreaded access) */ class TestThread extends Thread { Map _content; BTree _btree; volatile boolean _continue = true; int THREAD_SLEEP_TIME = 50; // in ms String _name; TestThread(String name, BTree btree, Map content) { _content = content; _btree = btree; _name = name; } public void setStop() { _continue = false; } private void action() throws IOException { Iterator iterator = _content.entrySet().iterator(); Map.Entry entry; if (DEBUG) { System.out.println("Thread " + _name + ": fill btree."); } while (iterator.hasNext()) { entry = (Map.Entry) iterator.next(); assertEquals(null, _btree.insert(entry.getKey(), entry.getValue(), false)); } // as other threads are filling the btree as well, the size // of the btree is unknown (but must be at least the size of // the content map) assertTrue(_content.size() <= _btree._entries); iterator = _content.entrySet().iterator(); if (DEBUG) { System.out.println("Thread " + _name + ": iterates btree."); } while (iterator.hasNext()) { entry = (Map.Entry) iterator.next(); assertEquals(entry.getValue(), _btree.get(entry.getKey())); assertTrue(contains(entry, _btree)); assertTrue(containsKey(entry.getKey(), _btree)); assertTrue(containsValue(entry.getValue(), _btree)); } iterator = _content.entrySet().iterator(); Object key; if (DEBUG) { System.out.println("Thread " + _name + ": removes his elements from the btree."); } while (iterator.hasNext()) { key = ((Map.Entry) iterator.next()).getKey(); _btree.remove(key); assertTrue(!containsKey(key, _btree)); } } public void run() { if (DEBUG) System.out.println("Thread " + _name + ": started."); try { while (_continue) { action(); try { Thread.sleep(THREAD_SLEEP_TIME); } catch (InterruptedException except) { except.printStackTrace(); } } } catch (Throwable t) { if (DEBUG) { System.err.println("Thread " + _name + " threw an exception:"); t.printStackTrace(); } handleThreadException(t); } if (DEBUG) System.out.println("Thread " + _name + ": stopped."); } } // end of class TestThread static class ObjectTT implements Serializable { Object _content; private ObjectTT() { // empty } public ObjectTT(Object content) { _content = content; } Object getContent() { return _content; } public boolean equals(Object obj) { if (!(obj instanceof ObjectTT)) { return false; } return _content.equals(((ObjectTT) obj).getContent()); } public String toString() { return ("ObjectTT {content='" + _content + "'}"); } } // ObjectTT public void testIssue2(){ //this causes stack overflow // https://github.com/jankotek/JDBM3/issues/2 DB build = DBMaker.openFile(newTestFile()).setMRUCacheSize(100).make(); Map treeMap = build.createTreeMap("treeMap"); for (int i = 0; i < 100000; i++) { treeMap.put(i + "asdddfffffffffffffffffffdgf" + i + "sddfdfsfddddddddddddddddd" + i, "dsfgfg.dfcdfsgfgfffffffffffffffffdddddddddd"); if (i % 10000 == 0) { build.commit(); } } build.commit(); build.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/ByteArrayComparator.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.Serializable; import java.util.Comparator; /** * Comparator for byte arrays. * * @author Alex Boisvert */ public final class ByteArrayComparator implements Comparator, Serializable { /** * Version id for serialization. */ final static long serialVersionUID = 1L; /** * Compare two objects. * * @param obj1 First object * @param obj2 Second object * @return a positive integer if obj1 > obj2, 0 if obj1 == obj2, * and a negative integer if obj1 < obj2 */ public int compare(byte[] obj1, byte[] obj2) { if (obj1 == null) { throw new IllegalArgumentException("Argument 'obj1' is null"); } if (obj2 == null) { throw new IllegalArgumentException("Argument 'obj2' is null"); } return compareByteArray(obj1, obj2); } /** * Compare two byte arrays. */ public static int compareByteArray(byte[] thisKey, byte[] otherKey) { int len = Math.min(thisKey.length, otherKey.length); // compare the byte arrays for (int i = 0; i < len; i++) { if (thisKey[i] >= 0) { if (otherKey[i] >= 0) { // both positive if (thisKey[i] < otherKey[i]) { return -1; } else if (thisKey[i] > otherKey[i]) { return 1; } } else { // otherKey is negative => greater (because MSB is 1) return -1; } } else { if (otherKey[i] >= 0) { // thisKey is negative => greater (because MSB is 1) return 1; } else { // both negative if (thisKey[i] < otherKey[i]) { return -1; } else if (thisKey[i] > otherKey[i]) { return 1; } } } } if (thisKey.length == otherKey.length) { return 0; } if (thisKey.length < otherKey.length) { return -1; } return 1; } } ================================================ FILE: src/test/java/org/apache/jdbm/CompactTest.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.util.Map; public class CompactTest extends TestCaseWithTestFile { final int MAX = 1000 * 1000; public void testHashCompaction() throws IOException{ String f = newTestFile(); DB db0 = DBMaker.openFile(f).disableTransactions().make(); Map db = db0.createHashMap("db"); System.out.println("Adding"); for( int i=0 ; i < MAX; i++) { db.put("key"+i, "value"+i); } db0.close(); db0 = DBMaker.openFile(f).disableTransactions().make(); db = db0.getHashMap("db"); System.out.println("Deleting"); for( int i=0 ; i < MAX; i++) { db.remove("key"+i); } db0.close(); db0 = DBMaker.openFile(f).disableTransactions().make(); db = db0.getHashMap("db"); System.out.println("Adding"); for( int i=0 ; i < MAX; i++) { db.put("key"+i, "value"+i); } System.out.println("Closing"); db0.close(); } public void testBTreeCompaction() throws IOException{ String f = newTestFile(); DB db0 = DBMaker.openFile(f).disableTransactions().make(); Map db = db0.createTreeMap("db"); System.out.println("Adding"); for( int i=0 ; i < MAX; i++) { db.put("key"+i, "value"+i); } db0.close(); db0 = DBMaker.openFile(f).disableTransactions().make(); db = db0.getTreeMap("db"); System.out.println("Deleting"); for( int i=0 ; i < MAX; i++) { db.remove("key"+i); } db0.close(); db0 = DBMaker.openFile(f).disableTransactions().make(); db = db0.getTreeMap("db"); System.out.println("Adding"); for( int i=0 ; i < MAX; i++) { db.put("key"+i, "value"+i); } System.out.println("Closing"); db0.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/ConcurrentBTreeReadTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.io.Serializable; import java.util.Collections; import java.util.Comparator; import java.util.Random; public class ConcurrentBTreeReadTest extends TestCaseWithTestFile { public static class Dummy implements Serializable { private static final long serialVersionUID = -5567451291089724793L; private long key; @SuppressWarnings("unused") private byte space[] = new byte[1024]; public Dummy() { } public Dummy(long key) { this.key = key; } @Override public int hashCode() { return (int) key; } @Override public boolean equals(Object obj) { if (!(obj instanceof Dummy)) return false; Dummy other = (Dummy) obj; if (key != other.key) return false; return true; } } private DBAbstract db; private BTree btree; private int entries = 20000; private int readers = 5; public void setUp() throws Exception { super.setUp(); db = newDBCache(); btree = BTree.createInstance(db, (Comparator) Collections.reverseOrder(),null,null,true); System.err.println(db.getClass()); } public void testConcurrent() throws Exception { Runnable read = new Runnable() { public void run() { read(); } }; Thread t[] = new Thread[readers]; int c = 0; for (int i = 0; i < entries; i++) { btree.insert((long) i, new Dummy(i), false); if (i % 1000 == 0) { System.err.println("count " + i); commit(); } } System.err.println("done!"); commit(); System.gc(); Thread.sleep(1000); for (int i = 0; i < readers; i++) { t[c++] = new Thread(read); } System.err.println("start readers"); long start = System.currentTimeMillis(); for (int i = 0; i < t.length; i++) { t[i].start(); } for (int i = 0; i < t.length; i++) { t[i].join(); } long end = System.currentTimeMillis(); System.err.println("done " + (end - start) + "ms"); } private Object fetch(Long id) throws IOException { try { return btree.get(id); } catch (IOException e) { System.out.println("ERR " + id); e.printStackTrace(); return null; } } private void commit() throws IOException { db.commit(); } private void read() { Random r = new Random(); for (int i = 0; i < entries; i++) { try { fetch((long) r.nextInt(entries)); } catch (IOException e) { throw new RuntimeException(e); } } System.err.println("done read"); } } ================================================ FILE: src/test/java/org/apache/jdbm/ConcurrentMapInterfaceTest.java ================================================ /* * Copyright (C) 2008 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.util.concurrent.ConcurrentMap; /** * Tests representing the contract of {@link ConcurrentMap}. Concrete * subclasses of this base class test conformance of concrete * {@link ConcurrentMap} subclasses to that contract. * *

The tests in this class for null keys and values only check maps for * which null keys and values are not allowed. There are currently no * {@link ConcurrentMap} implementations that support nulls. * * @author Jared Levy */ public abstract class ConcurrentMapInterfaceTest extends MapInterfaceTest { protected ConcurrentMapInterfaceTest(boolean allowsNullKeys, boolean allowsNullValues, boolean supportsPut, boolean supportsRemove, boolean supportsClear, boolean supportsIteratorRemove) { super(allowsNullKeys, allowsNullValues, supportsPut, supportsRemove, supportsClear,supportsIteratorRemove); } /** * Creates a new value that is not expected to be found in * {@link #makePopulatedMap()} and differs from the value returned by * {@link #getValueNotInPopulatedMap()}. * * @return a value * @throws UnsupportedOperationException if it's not possible to make a value * that will not be found in the map */ protected abstract V getSecondValueNotInPopulatedMap() throws UnsupportedOperationException; @Override protected abstract ConcurrentMap makeEmptyMap() throws UnsupportedOperationException; @Override protected abstract ConcurrentMap makePopulatedMap() throws UnsupportedOperationException; @Override protected ConcurrentMap makeEitherMap() { try { return makePopulatedMap(); } catch (UnsupportedOperationException e) { return makeEmptyMap(); } } public void testPutIfAbsentNewKey() { final ConcurrentMap map; final K keyToPut; final V valueToPut; try { map = makeEitherMap(); keyToPut = getKeyNotInPopulatedMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (supportsPut) { int initialSize = map.size(); V oldValue = map.putIfAbsent(keyToPut, valueToPut); assertEquals(valueToPut, map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(valueToPut)); assertEquals(initialSize + 1, map.size()); assertNull(oldValue); } else { try { map.putIfAbsent(keyToPut, valueToPut); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testPutIfAbsentExistingKey() { final ConcurrentMap map; final K keyToPut; final V valueToPut; try { map = makePopulatedMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToPut = map.keySet().iterator().next(); if (supportsPut) { V oldValue = map.get(keyToPut); int initialSize = map.size(); assertEquals(oldValue, map.putIfAbsent(keyToPut, valueToPut)); assertEquals(oldValue, map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(oldValue)); assertFalse(map.containsValue(valueToPut)); assertEquals(initialSize, map.size()); } else { try { map.putIfAbsent(keyToPut, valueToPut); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testPutIfAbsentNullKey() { if (allowsNullKeys) { return; // Not yet implemented } final ConcurrentMap map; final V valueToPut; try { map = makeEitherMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { map.putIfAbsent(null, valueToPut); fail("Expected NullPointerException"); } catch (NullPointerException e) { // Expected. } } else { try { map.putIfAbsent(null, valueToPut); fail("Expected UnsupportedOperationException or NullPointerException"); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testPutIfAbsentNewKeyNullValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToPut; try { map = makeEitherMap(); keyToPut = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { map.putIfAbsent(keyToPut, null); fail("Expected NullPointerException"); } catch (NullPointerException e) { // Expected. } } else { try { map.putIfAbsent(keyToPut, null); fail("Expected UnsupportedOperationException or NullPointerException"); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testRemoveKeyValueExisting() { final ConcurrentMap map; final K keyToRemove; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToRemove = map.keySet().iterator().next(); V oldValue = map.get(keyToRemove); if (supportsRemove) { int initialSize = map.size(); assertTrue(map.remove(keyToRemove, oldValue)); assertFalse(map.containsKey(keyToRemove)); assertEquals(initialSize - 1, map.size()); } else { try { map.remove(keyToRemove, oldValue); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testRemoveKeyValueMissingKey() { final ConcurrentMap map; final K keyToRemove; final V valueToRemove; try { map = makePopulatedMap(); keyToRemove = getKeyNotInPopulatedMap(); valueToRemove = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (supportsRemove) { int initialSize = map.size(); assertFalse(map.remove(keyToRemove, valueToRemove)); assertEquals(initialSize, map.size()); } else { try { map.remove(keyToRemove, valueToRemove); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testRemoveKeyValueDifferentValue() { final ConcurrentMap map; final K keyToRemove; final V valueToRemove; try { map = makePopulatedMap(); valueToRemove = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToRemove = map.keySet().iterator().next(); if (supportsRemove) { int initialSize = map.size(); V oldValue = map.get(keyToRemove); assertFalse(map.remove(keyToRemove, valueToRemove)); assertEquals(oldValue, map.get(keyToRemove)); assertTrue(map.containsKey(keyToRemove)); assertEquals(initialSize, map.size()); } else { try { map.remove(keyToRemove, valueToRemove); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testRemoveKeyValueNullKey() { if (allowsNullKeys) { return; // Not yet implemented } final ConcurrentMap map; final V valueToRemove; try { map = makeEitherMap(); valueToRemove = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsRemove) { try { assertFalse(map.remove(null, valueToRemove)); } catch (NullPointerException e) { // Optional. } } else { try { assertFalse(map.remove(null, valueToRemove)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testRemoveKeyValueExistingKeyNullValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToRemove; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToRemove = map.keySet().iterator().next(); int initialSize = map.size(); if (supportsRemove) { try { assertFalse(map.remove(keyToRemove, null)); } catch (NullPointerException e) { // Optional. } } else { try { assertFalse(map.remove(keyToRemove, null)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testRemoveKeyValueMissingKeyNullValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToRemove; try { map = makeEitherMap(); keyToRemove = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsRemove) { try { assertFalse(map.remove(keyToRemove, null)); } catch (NullPointerException e) { // Optional. } } else { try { assertFalse(map.remove(keyToRemove, null)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } /* Replace2 tests call 2-parameter replace(key, value) */ public void testReplace2ExistingKey() { final ConcurrentMap map; final K keyToReplace; final V newValue; try { map = makePopulatedMap(); newValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToReplace = map.keySet().iterator().next(); if (supportsPut) { V oldValue = map.get(keyToReplace); int initialSize = map.size(); assertEquals(oldValue, map.replace(keyToReplace, newValue)); assertEquals(newValue, map.get(keyToReplace)); assertTrue(map.containsKey(keyToReplace)); assertTrue(map.containsValue(newValue)); assertEquals(initialSize, map.size()); } else { try { map.replace(keyToReplace, newValue); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testReplace2MissingKey() { final ConcurrentMap map; final K keyToReplace; final V newValue; try { map = makeEitherMap(); keyToReplace = getKeyNotInPopulatedMap(); newValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (supportsPut) { int initialSize = map.size(); assertNull(map.replace(keyToReplace, newValue)); assertNull(map.get(keyToReplace)); assertFalse(map.containsKey(keyToReplace)); assertFalse(map.containsValue(newValue)); assertEquals(initialSize, map.size()); } else { try { map.replace(keyToReplace, newValue); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testReplace2NullKey() { if (allowsNullKeys) { return; // Not yet implemented } final ConcurrentMap map; final V valueToReplace; try { map = makeEitherMap(); valueToReplace = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { assertNull(map.replace(null, valueToReplace)); } catch (NullPointerException e) { // Optional. } } else { try { assertNull(map.replace(null, valueToReplace)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace2ExistingKeyNullValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToReplace; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToReplace = map.keySet().iterator().next(); int initialSize = map.size(); if (supportsPut) { try { map.replace(keyToReplace, null); fail("Expected NullPointerException"); } catch (NullPointerException e) { // Expected. } } else { try { map.replace(keyToReplace, null); fail("Expected UnsupportedOperationException or NullPointerException"); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace2MissingKeyNullValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToReplace; try { map = makeEitherMap(); keyToReplace = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { assertNull(map.replace(keyToReplace, null)); } catch (NullPointerException e) { // Optional. } } else { try { assertNull(map.replace(keyToReplace, null)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } /* * Replace3 tests call 3-parameter replace(key, oldValue, newValue) */ public void testReplace3ExistingKeyValue() { final ConcurrentMap map; final K keyToReplace; final V oldValue; final V newValue; try { map = makePopulatedMap(); newValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToReplace = map.keySet().iterator().next(); oldValue = map.get(keyToReplace); if (supportsPut) { int initialSize = map.size(); assertTrue(map.replace(keyToReplace, oldValue, newValue)); assertEquals(newValue, map.get(keyToReplace)); assertTrue(map.containsKey(keyToReplace)); assertTrue(map.containsValue(newValue)); assertFalse(map.containsValue(oldValue)); assertEquals(initialSize, map.size()); } else { try { map.replace(keyToReplace, oldValue, newValue); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testReplace3ExistingKeyDifferentValue() { final ConcurrentMap map; final K keyToReplace; final V oldValue; final V newValue; try { map = makePopulatedMap(); oldValue = getValueNotInPopulatedMap(); newValue = getSecondValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToReplace = map.keySet().iterator().next(); final V originalValue = map.get(keyToReplace); int initialSize = map.size(); if (supportsPut) { assertFalse(map.replace(keyToReplace, oldValue, newValue)); } else { try { map.replace(keyToReplace, oldValue, newValue); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertTrue(map.containsKey(keyToReplace)); assertFalse(map.containsValue(newValue)); assertFalse(map.containsValue(oldValue)); assertEquals(originalValue, map.get(keyToReplace)); assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace3MissingKey() { final ConcurrentMap map; final K keyToReplace; final V oldValue; final V newValue; try { map = makeEitherMap(); keyToReplace = getKeyNotInPopulatedMap(); oldValue = getValueNotInPopulatedMap(); newValue = getSecondValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { assertFalse(map.replace(keyToReplace, oldValue, newValue)); } else { try { map.replace(keyToReplace, oldValue, newValue); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertFalse(map.containsKey(keyToReplace)); assertFalse(map.containsValue(newValue)); assertFalse(map.containsValue(oldValue)); assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace3NullKey() { if (allowsNullKeys) { return; // Not yet implemented } final ConcurrentMap map; final V oldValue; final V newValue; try { map = makeEitherMap(); oldValue = getValueNotInPopulatedMap(); newValue = getSecondValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { assertFalse(map.replace(null, oldValue, newValue)); } catch (NullPointerException e) { // Optional. } } else { try { assertFalse(map.replace(null, oldValue, newValue)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace3ExistingKeyNullOldValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToReplace; final V newValue; try { map = makePopulatedMap(); newValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToReplace = map.keySet().iterator().next(); final V originalValue = map.get(keyToReplace); int initialSize = map.size(); if (supportsPut) { try { assertFalse(map.replace(keyToReplace, null, newValue)); } catch (NullPointerException e) { // Optional. } } else { try { assertFalse(map.replace(keyToReplace, null, newValue)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertEquals(originalValue, map.get(keyToReplace)); assertInvariants(map); } public void testReplace3MissingKeyNullOldValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToReplace; final V newValue; try { map = makeEitherMap(); keyToReplace = getKeyNotInPopulatedMap(); newValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { assertFalse(map.replace(keyToReplace, null, newValue)); } catch (NullPointerException e) { // Optional. } } else { try { assertFalse(map.replace(keyToReplace, null, newValue)); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace3MissingKeyNullNewValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToReplace; final V oldValue; try { map = makeEitherMap(); keyToReplace = getKeyNotInPopulatedMap(); oldValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } int initialSize = map.size(); if (supportsPut) { try { map.replace(keyToReplace, oldValue, null); } catch (NullPointerException e) { // Optional. } } else { try { map.replace(keyToReplace, oldValue, null); } catch (UnsupportedOperationException e) { // Optional. } catch (NullPointerException e) { // Optional. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testReplace3ExistingKeyValueNullNewValue() { if (allowsNullValues) { return; // Not yet implemented } final ConcurrentMap map; final K keyToReplace; final V oldValue; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToReplace = map.keySet().iterator().next(); oldValue = map.get(keyToReplace); int initialSize = map.size(); if (supportsPut) { try { map.replace(keyToReplace, oldValue, null); fail("Expected NullPointerException"); } catch (NullPointerException e) { // Expected. } } else { try { map.replace(keyToReplace, oldValue, null); fail("Expected UnsupportedOperationException or NullPointerException"); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertEquals(initialSize, map.size()); assertEquals(oldValue, map.get(keyToReplace)); assertInvariants(map); } } ================================================ FILE: src/test/java/org/apache/jdbm/DBCacheMRUTest.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.util.ArrayList; public class DBCacheMRUTest extends TestCaseWithTestFile { public void testPurgeEntryClearsCache() throws IOException { DBCacheMRU d = (DBCacheMRU) newDBCache(); for(long i = 0;i<1e3;i++) d.addEntry(newEntry(i)); for(long i = 0;i<1e3;i++) d.purgeEntry(); assertEquals(d._hash.size(),0); } DBCacheMRU.CacheEntry newEntry(long i){ return new DBCacheMRU.CacheEntry(i,i); } public void testCacheMaxSize() throws IOException { DBCacheMRU d = (DBCacheMRU) DBMaker .openFile(newTestFile()) .setMRUCacheSize(100) .make(); ArrayList recids = new ArrayList(); for(int i = 0;i<1e5;i++){ recids.add(d.insert("aa"+i)); } d.commit(); for(int i = 0;i<1e5;i++){ d.fetch(recids.get(i)); } assert(d._hash.size()<=100); } } ================================================ FILE: src/test/java/org/apache/jdbm/DBCacheTest.java ================================================ package org.apache.jdbm; import java.util.Map; import java.util.Set; public class DBCacheTest extends TestCaseWithTestFile { // https://github.com/jankotek/JDBM3/issues/11 public void test_Issue_11_soft_cache_record_disappear(){ long MAX = (long) 1e6; String file = newTestFile(); DB d = DBMaker.openFile(file) .disableTransactions() .enableSoftCache() .make(); Set set = d.createHashSet("1"); for(Integer i = 0;i,Serializable { public String deserialize(DataInput in) throws IOException, ClassNotFoundException { i.incrementAndGet(); return in.readUTF(); } public void serialize(DataOutput out, String obj) throws IOException { i.incrementAndGet(); out.writeUTF(obj); } } public void testTreeMapValueSerializer() throws Exception { i.set(0); Serializer ser = new Serial(); DB db = newDBCache(); Map t = db.createTreeMap("test", null, null, ser); t.put(1l, "hopsa hejsa1"); t.put(2l, "hopsa hejsa2"); db.commit(); assertEquals(t.get(2l), "hopsa hejsa2"); assertTrue(i.intValue() > 0); } public void testCountRecid() throws Exception { DBStore db = newDBNoCache(); db.insert(""); //first insert an empty record, to make sure serializer is initialized long baseCount = db.countRecords(); for (int i = 1; i < 3000; i++) { Object val = "qjiodjqwoidjqwiodoi"; db.insert(val); if (i % 1000 == 0) db.commit(); assertEquals(db.countRecords(), i + baseCount); } } public void testGetCollections() throws IOException { DB db = newDBCache(); db.createTreeMap("treemap"); db.createHashMap("hashmap"); db.createTreeSet("treeset"); db.createHashSet("hashset"); db.createLinkedList("linkedlist"); Mapcols = db.getCollections(); assertTrue(cols.get("treemap") instanceof SortedMap); assertTrue(cols.get("hashmap") instanceof Map); assertTrue(cols.get("treeset") instanceof SortedSet); assertTrue(cols.get("hashset") instanceof Set); assertTrue(cols.get("linkedlist") instanceof List); } public void testRegisterShutdown(){ DB d = DBMaker.openFile(newTestFile()).closeOnExit().make(); //do nothing } public void testDeleteAfterExit(){ String f = newTestFile(); File f1 = new File(StorageDiskMapped.makeFileName(f,1,0)); File f2 = new File(StorageDiskMapped.makeFileName(f,-1,0)); assertFalse(f1.exists()); assertFalse(f2.exists()); DB d = DBMaker.openFile(f).deleteFilesAfterClose().make(); d.createHashSet("test"); assertTrue(f1.exists()); assertTrue(f2.exists()); d.close(); assertFalse(f1.exists()); assertFalse(f2.exists()); } public void testDeleteAfterExitRAF(){ String f = newTestFile(); File f1 = new File(StorageDiskMapped.makeFileName(f,1,0)); File f2 = new File(StorageDiskMapped.makeFileName(f,-1,0)); assertFalse(f1.exists()); assertFalse(f2.exists()); DB d = DBMaker.openFile(f).deleteFilesAfterClose().useRandomAccessFile().make(); d.createHashSet("test"); assertTrue(f1.exists()); assertTrue(f2.exists()); d.close(); assertFalse(f1.exists()); assertFalse(f2.exists()); } public void testDeleteLinkedList() throws IOException { DBStore d = newDBNoCache(); d.createHashMap("testXX").put("aa","bb"); //make sure serializer and name map are initilaized d.commit(); long recCount = d.countRecords(); List l = d.createLinkedList("test"); l.add("1"); l.add("2"); d.commit(); assertFalse(recCount == d.countRecords()); d.deleteCollection("test"); assertEquals(recCount,d.countRecords()); } public void testDeleteTreeMap() throws IOException { DBStore d = newDBNoCache(); d.createHashMap("testXX").put("aa","bb"); //make sure serializer and name map are initilaized d.commit(); long recCount = d.countRecords(); Map l = d.createTreeMap("test"); l.put("1", "b"); l.put("2", "b"); d.commit(); assertFalse(recCount == d.countRecords()); d.deleteCollection("test"); assertEquals(recCount,d.countRecords()); } public void testDeleteHashMap() throws IOException { DBStore d = newDBNoCache(); d.createHashMap("testXX").put("aa","bb"); //make sure serializer and name map are initilaized d.commit(); long recCount = d.countRecords(); Map l = d.createHashMap("test"); l.put("1", "b"); l.put("2", "b"); d.commit(); assertFalse(recCount == d.countRecords()); d.deleteCollection("test"); assertEquals(recCount,d.countRecords()); } public void testDeleteEmptyLinkedList() throws IOException { DBStore d = newDBNoCache(); d.createHashMap("testXX").put("aa","bb"); //make sure serializer and name map are initilaized d.commit(); long recCount = d.countRecords(); List l = d.createLinkedList("test"); d.commit(); assertFalse(recCount == d.countRecords()); d.deleteCollection("test"); assertEquals(recCount,d.countRecords()); } public void testDeleteEmptyTreeMap() throws IOException { DBStore d = newDBNoCache(); d.createHashMap("testXX").put("aa","bb"); //make sure serializer and name map are initilaized d.commit(); long recCount = d.countRecords(); Map l = d.createTreeMap("test"); d.commit(); assertFalse(recCount == d.countRecords()); d.deleteCollection("test"); assertEquals(recCount,d.countRecords()); } public void testDeleteEmptyHashMap() throws IOException { DBStore d = newDBNoCache(); d.createHashMap("testXX").put("aa","bb"); //make sure serializer and name map are initilaized d.commit(); long recCount = d.countRecords(); Map l = d.createHashMap("test"); d.commit(); assertFalse(recCount == d.countRecords()); d.deleteCollection("test"); assertEquals(recCount,d.countRecords()); } public void testHugeRecord() throws IOException { DBStore s = newDBNoCache(); try{ s.insert(new byte[50*1000*1000]); s.commit(); fail(); }catch(IllegalArgumentException e){ //expected } } public void testCompressRecid(){ for(long l = Magic.PAGE_HEADER_SIZE;l map = db.createHashMap("test"); map.putAll(toAdd); db.commit(); db.deleteCollection("test"); map = db.getHashMap("test"); assertNull(map); } } ================================================ FILE: src/test/java/org/apache/jdbm/DataInputOutputTest.java ================================================ package org.apache.jdbm; import junit.framework.TestCase; import java.io.IOException; public class DataInputOutputTest extends TestCase { final DataInputOutput d = new DataInputOutput(); public void testInt() throws IOException { int i = 123129049; d.writeInt(i); d.reset(); assertEquals(i, d.readInt()); } public void testLong() throws IOException { long i = 1231290495545446485L; d.writeLong(i); d.reset(); assertEquals(i, d.readLong()); } public void testBooelean() throws IOException { d.writeBoolean(true); d.reset(); assertEquals(true, d.readBoolean()); d.reset(); d.writeBoolean(false); d.reset(); assertEquals(false, d.readBoolean()); } public void testByte() throws IOException { for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) { d.writeByte(i); d.reset(); assertEquals(i, d.readByte()); d.reset(); } } public void testUnsignedByte() throws IOException { for (int i = 0; i <= 255; i++) { d.write(i); d.reset(); assertEquals(i, d.readUnsignedByte()); d.reset(); } } public void testLongPacker() throws IOException { for (int i = 0; i < 1e7; i++) { LongPacker.packInt(d, i); d.reset(); assertEquals(i, LongPacker.unpackInt(d)); d.reset(); } } } ================================================ FILE: src/test/java/org/apache/jdbm/DefragTest.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.util.*; public class DefragTest extends TestCaseWithTestFile { public void testDefrag1() throws IOException { String file = newTestFile(); DBStore m = new DBStore(file, false, false,false); long loc = m.insert("123"); m.defrag(true); m.close(); m = new DBStore(file, false, false,false); assertEquals(m.fetch(loc), "123"); } public void testDefrag2() throws IOException { String file = newTestFile(); DBStore m = new DBStore(file, false, false,false); TreeMap map = new TreeMap(); for (int i = 0; i < 10000; i++) { long loc = m.insert("" + i); map.put(loc, "" + i); } m.defrag(true); m.close(); m = new DBStore(file, false, false,false); for (Long l : map.keySet()) { String val = map.get(l); assertEquals(val, m.fetch(l)); } } public void testDefragBtree() throws IOException { String file = newTestFile(); DBStore m = new DBStore(file, false, false,false); Map t = m.createTreeMap("aa"); TreeMap t2 = new TreeMap(); for (int i = 0; i < 10000; i++) { t.put(i, "" + i); t2.put(i, "" + i); } m.defrag(true); m.close(); m = new DBStore(file, false, false,false); t = m.getTreeMap("aa"); assertEquals(t, t2); } public void testDefragLinkedList() throws Exception { String file = newTestFile(); DBStore r = new DBStore(file, false, false,false); List l = r.createLinkedList("test"); Map junk = new LinkedHashMap(); for (int i = 0; i < 1e4; i++) { //insert some junk Double d = Math.random(); l.add(d); junk.put(r.insert(d), d); } r.commit(); //make copy of linked list List l2 = new ArrayList(l); long oldRecCount = r.countRecords(); r.defrag(true); r.close(); r = new DBStore(file, false, false,false); assertEquals(oldRecCount, r.countRecords()); //compare that list was unchanged assertEquals(l2, new ArrayList(r.getLinkedList("test"))); //and check that random junk still have the same recids for (Long recid : junk.keySet()) { assertEquals(junk.get(recid), r.fetch(recid)); } r.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/FileHeaderTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.TestCase; public class FileHeaderTest extends TestCase { /** * Test set, write, read */ public void testSetWriteRead() throws Exception { PageIo b = new PageIo(0, new byte[1000]); b.fileHeaderCheckHead(true); for (int i = 0; i < Magic.NLISTS; i++) { b.fileHeaderSetFirstOf(i, 100 * i); b.fileHeaderSetLastOf(i, 200 * i); } b.fileHeaderCheckHead(false); for (int i = 0; i < Magic.NLISTS; i++) { assertEquals("first " + i, i * 100, b.fileHeaderGetFirstOf(i)); assertEquals("last " + i, i * 200, b.fileHeaderGetLastOf(i)); } } /** * Test root rowids */ public void testRootRowids() throws Exception { PageIo b = new PageIo(0, new byte[Storage.PAGE_SIZE]); b.fileHeaderCheckHead(true); for (int i = 0; i < Magic.FILE_HEADER_NROOTS; i++) { b.fileHeaderSetRoot(i, 100 * i); } b.fileHeaderCheckHead(false); for (int i = 0; i < Magic.FILE_HEADER_NROOTS; i++) { assertEquals("root " + i, i * 100, b.fileHeaderGetRoot(i)); } } } ================================================ FILE: src/test/java/org/apache/jdbm/FileLockTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOError; import java.io.IOException; public class FileLockTest extends TestCaseWithTestFile { public void testLock() throws IOException { String file = newTestFile(); DB db1 = DBMaker.openFile(file).make(); //now open same file second time, exception should be thrown try { DB db2 = DBMaker.openFile(file).make(); fail("Exception should be thrown if file was locked"); } catch (IOError e) { //expected } db1.close(); //after close lock should be released, reopen DB db3 = DBMaker.openFile(file).make(); db3.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/HTreeBucketTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.*; import java.util.Map; /** * This class contains all Unit tests for {@link HTreeBucket}. * * @author Alex Boisvert */ public class HTreeBucketTest extends TestCaseWithTestFile { /** * Basic tests */ public void testBasics() throws IOException { DB db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); HTreeBucket bucket = new HTreeBucket(tree, (byte) 0); // add bucket.addElement("key", "value"); String s = (String) bucket.getValue("key"); assertEquals("value", s); // replace bucket.addElement("key", "value2"); s = (String) bucket.getValue("key"); assertEquals("value2", s); // add bucket.addElement("key2", "value3"); s = (String) bucket.getValue("key2"); assertEquals("value3", s); // remove bucket.removeElement("key2"); s = (String) bucket.getValue("key2"); assertEquals(null, s); bucket.removeElement("key"); s = (String) bucket.getValue("key"); assertEquals(null, s); db.close(); } public static class LongSerializer implements Serializer,Serializable { public LongSerializer() { } public void serialize(DataOutput out, Long obj) throws IOException { out.writeLong(obj); } public Long deserialize(DataInput in) throws IOException, ClassNotFoundException { return in.readLong(); } } public void testCustomSerializer() throws IOException { Serializer ser = new LongSerializer(); DB db = newDBCache(); Map s = db.createHashMap("test", ser, ser); s.put(new Long(1), new Long(2)); s.put(new Long(4), new Long(5)); db.commit(); db.clearCache(); assertTrue(s.size() == 2); assertEquals(s.get(new Long(1)), new Long(2)); assertEquals(s.get(new Long(4)), new Long(5)); } } ================================================ FILE: src/test/java/org/apache/jdbm/HTreeDirectoryTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.util.Hashtable; import java.util.Iterator; /** * This class contains all Unit tests for {@link HTreeDirectory}. * * @author Alex Boisvert */ public class HTreeDirectoryTest extends TestCaseWithTestFile { /** * Basic tests */ public void testBasics() throws IOException { System.out.println("testBasics"); DBAbstract db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); HTreeDirectory dir = tree.getRoot(); dir.put("key", "value"); String s = (String) dir.get("key"); assertEquals("value", s); db.close(); } /** * Mixed tests */ public void testMixed() throws IOException { System.out.println("testMixed"); DBAbstract db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); HTreeDirectory dir = tree.getRoot(); Hashtable hash = new Hashtable(); // use to compare results int max = 30; // must be even // insert & check values for (int i = 0; i < max; i++) { dir.put("key" + i, "value" + i); hash.put("key" + i, "value" + i); } db.commit(); for (int i = 0; i < max; i++) { String s = (String) dir.get("key" + i); assertEquals("value" + i, s); } db.commit(); // replace only even values for (int i = 0; i < max; i += 2) { dir.put("key" + i, "value" + (i * 2 + 1)); hash.put("key" + i, "value" + (i * 2 + 1)); } db.commit(); for (int i = 0; i < max; i++) { if ((i % 2) == 1) { // odd String s = (String) dir.get("key" + i); assertEquals("value" + i, s); } else { // even String s = (String) dir.get("key" + i); assertEquals("value" + (i * 2 + 1), s); } } db.commit(); // remove odd numbers for (int i = 1; i < max; i += 2) { dir.remove("key" + i); hash.remove("key" + i); } db.commit(); for (int i = 0; i < max; i++) { if ((i % 2) == 1) { // odd String s = (String) dir.get("key" + i); assertEquals(null, s); } else { // even String s = (String) dir.get("key" + i); assertEquals("value" + (i * 2 + 1), s); } } db.commit(); db.close(); db = null; } void checkEnumerations(Hashtable hash, HTreeDirectory dir) throws IOException { // test keys Hashtable clone = (Hashtable) hash.clone(); int count = 0; Iterator iter = dir.keys(); while (iter.hasNext()) { String s = iter.next(); count++; clone.remove(s); } assertEquals(hash.size(), count); // test values clone = (Hashtable) hash.clone(); count = 0; iter = dir.values(); while (iter.hasNext()) { String s = iter.next(); count++; clone.remove(s); } assertEquals(hash.size(), count); } } ================================================ FILE: src/test/java/org/apache/jdbm/HTreeMapTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.util.concurrent.ConcurrentMap; public class HTreeMapTest extends ConcurrentMapInterfaceTest { public HTreeMapTest() { super(false, false, true, true, true, true); } DBAbstract r; public void setUp() throws Exception { r = TestCaseWithTestFile.newDBNoCache(); } @Override protected Integer getKeyNotInPopulatedMap() throws UnsupportedOperationException { return -100; } @Override protected String getValueNotInPopulatedMap() throws UnsupportedOperationException { return "XYZ"; } @Override protected String getSecondValueNotInPopulatedMap() throws UnsupportedOperationException { return "AAAA"; } @Override protected ConcurrentMap makeEmptyMap() throws UnsupportedOperationException { HTree b = (HTree) r.createHashMap("" + Math.random()); return b; } @Override protected ConcurrentMap makePopulatedMap() throws UnsupportedOperationException { ConcurrentMap map = makeEmptyMap(); for (int i = 0; i < 100; i++) map.put(i, "aa" + i); return map; } } ================================================ FILE: src/test/java/org/apache/jdbm/HTreeSetTest.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.util.Iterator; import java.util.Set; /** * Tests for HashSet which comes with JDBM. Original code comes from Apache Harmony, * Modified by Jan Kotek for use in JDBM */ public class HTreeSetTest extends TestCaseWithTestFile { Set hs; DB db; static Object[] objArray; { objArray = new Object[1000]; for (int i = 0; i < objArray.length; i++) objArray[i] = new Integer(i); } /** * @tests java.util.HashSet#HashSet() */ public void test_Constructor() { // Test for method java.util.HashSet() Set hs2 = db.createHashSet("secondHashSet", null); assertEquals("Created incorrect HashSet", 0, hs2.size()); } /** * @tests java.util.HashSet#add(java.lang.Object) */ public void test_addLjava_lang_Object() { // Test for method boolean java.util.HashSet.add(java.lang.Object) int size = hs.size(); hs.add(new Integer(8)); assertTrue("Added element already contained by set", hs.size() == size); hs.add(new Integer(-9)); assertTrue("Failed to increment set size after add", hs.size() == size + 1); assertTrue("Failed to add element to set", hs.contains(new Integer(-9))); } /** * @tests java.util.HashSet#clear() */ public void test_clear() { // Test for method void java.util.HashSet.clear() Set orgSet = new java.util.HashSet(hs); hs.clear(); Iterator i = orgSet.iterator(); assertEquals("Returned non-zero size after clear", 0, hs.size()); while (i.hasNext()) assertTrue("Failed to clear set", !hs.contains(i.next())); } /** * @tests java.util.HashSet#contains(java.lang.Object) */ public void test_containsLjava_lang_Object() { // Test for method boolean java.util.HashSet.contains(java.lang.Object) assertTrue("Returned false for valid object", hs.contains(objArray[90])); assertTrue("Returned true for invalid Object", !hs .contains(new Object())); } /** * @tests java.util.HashSet#isEmpty() */ public void test_isEmpty() { // Test for method boolean java.util.HashSet.isEmpty() assertTrue("Empty set returned false", db.createHashSet("secondHashSet", null).isEmpty()); assertTrue("Non-empty set returned true", !hs.isEmpty()); } /** * @tests java.util.HashSet#iterator() */ public void test_iterator() { // Test for method java.util.Iterator java.util.HashSet.iterator() Iterator i = hs.iterator(); int x = 0; while (i.hasNext()) { assertTrue("Failed to iterate over all elements", hs.contains(i .next())); ++x; } assertTrue("Returned iteration of incorrect size", hs.size() == x); } /** * @tests java.util.HashSet#remove(java.lang.Object) */ public void test_removeLjava_lang_Object() { // Test for method boolean java.util.HashSet.remove(java.lang.Object) int size = hs.size(); hs.remove(new Integer(98)); assertTrue("Failed to remove element", !hs.contains(new Integer(98))); assertTrue("Failed to decrement set size", hs.size() == size - 1); } /** * @tests java.util.HashSet#size() */ public void test_size() { // Test for method int java.util.HashSet.size() assertTrue("Returned incorrect size", hs.size() == (objArray.length)); hs.clear(); assertEquals("Cleared set returned non-zero size", 0, hs.size()); } /** * Sets up the fixture, for example, open a network connection. This method * is called before a test is executed. */ public void setUp() throws Exception { super.setUp(); db = newDBNoCache(); hs = db.createHashSet("testHashSet", null); for (int i = 0; i < objArray.length; i++) hs.add(objArray[i]); } /** * Tears down the fixture, for example, close a network connection. This * method is called after a test is executed. */ public void tearDown() throws Exception { db.close(); super.tearDown(); } public void testContains(){ } } ================================================ FILE: src/test/java/org/apache/jdbm/HTreeTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.AbstractMap.SimpleEntry; import java.util.Map; /** * This class contains all Unit tests for {@link HTree}. * * @author Alex Boisvert */ public class HTreeTest extends TestCaseWithTestFile { /** * Basic tests */ public void testIterator() throws IOException { DBAbstract db = newDBCache(); HTree testTree = (HTree) db.createHashMap("tree"); int total = 10; for (int i = 0; i < total; i++) { testTree.put(Long.valueOf("" + i), Long.valueOf("" + i)); } db.commit(); Iterator fi = testTree.values().iterator(); Object item; int count = 0; while (fi.hasNext()) { fi.next(); count++; } assertEquals(count, total); db.close(); } public void testRecordListener() throws IOException { DBAbstract db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); final List> dels = new ArrayList(); final List> ins = new ArrayList(); final List> updNew = new ArrayList(); final List> updOld = new ArrayList(); tree.addRecordListener(new RecordListener() { public void recordUpdated(Integer key, String oldValue, String newValue) throws IOException { updOld.add(new SimpleEntry(key, oldValue)); updNew.add(new SimpleEntry(key, newValue)); } public void recordRemoved(Integer key, String value) throws IOException { dels.add(new SimpleEntry(key, value)); } public void recordInserted(Integer key, String value) throws IOException { ins.add(new SimpleEntry(key, value)); } }); //test insert tree.put(11, "aa11"); tree.put(12, "aa12"); assertTrue(ins.contains(new SimpleEntry(11, "aa11"))); assertTrue(ins.contains(new SimpleEntry(12, "aa12"))); assertTrue(ins.size() == 2); ins.clear(); assertTrue(dels.isEmpty()); assertTrue(updNew.isEmpty()); assertTrue(updOld.isEmpty()); //test update tree.put(12, "aa123"); assertTrue(ins.isEmpty()); assertTrue(dels.isEmpty()); assertTrue(updOld.contains(new SimpleEntry(12, "aa12"))); assertTrue(updOld.size() == 1); updOld.clear(); assertTrue(updNew.contains(new SimpleEntry(12, "aa123"))); assertTrue(updNew.size() == 1); updNew.clear(); //test remove tree.remove(11); assertTrue(dels.contains(new SimpleEntry(11, "aa11"))); assertTrue(dels.size() == 1); dels.clear(); assertTrue(ins.isEmpty()); assertTrue(updOld.isEmpty()); assertTrue(updNew.isEmpty()); } public void testIssue(){ int size = 100000; int commitSize = 100000; DB build = DBMaker.openFile(newTestFile()).setMRUCacheSize(100).make(); Map hashMap = build.createHashMap("hashMap"); for (int i = 0; i < size; i++) { hashMap.put(i + "asdddfdgf" + i + "sddfdfsf" + i, "dsfgfg.dfcdfsgfg"); if (i % commitSize == 0) { build.commit(); } } build.commit(); build.calculateStatistics(); build.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/LinkedList2Test.java ================================================ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.ListIterator; /** * Tests for LinkedList2 which comes with JDBM. Original code comes from Apache Harmony, * Modified by Jan Kotek for use in JDBM */ public class LinkedList2Test extends TestCaseWithTestFile { DB db; LinkedList2 ll; LinkedList2 testList; private Object testObjOne; private Object testObjTwo; private Object testObjThree; private Object testObjFour; private Object testObjLast; static Object[] objArray; { objArray = new Object[100]; for (int i = 0; i < objArray.length; i++) objArray[i] = new Integer(i); } /** * @tests java.util.LinkedList2#add(int, java.lang.Object) */ public void test_addILjava_lang_Object() { // Test for method void java.util.LinkedList2.add(int, java.lang.Object) Object o = "Test"; ll.add(50, o); assertEquals("Failed to add Object>: " + ll.get(50).toString(), ll .get(50), o); assertEquals("Failed to fix up list after insert", ll.get(51), objArray[50]); assertEquals(ll.get(52), objArray[51]); ll.add(50, null); assertNull("Did not add null correctly", ll.get(50)); try { ll.add(-1, "Test"); fail("Should throw IndexOutOfBoundsException"); } catch (IndexOutOfBoundsException e) { // Excepted } try { ll.add(-1, null); fail("Should throw IndexOutOfBoundsException"); } catch (IndexOutOfBoundsException e) { // Excepted } try { ll.add(ll.size() + 1, "Test"); fail("Should throw IndexOutOfBoundsException"); } catch (IndexOutOfBoundsException e) { // Excepted } try { ll.add(ll.size() + 1, null); fail("Should throw IndexOutOfBoundsException"); } catch (IndexOutOfBoundsException e) { // Excepted } } /** * @tests java.util.LinkedList2#addAll(int, java.util.Collection) */ public void test_addAllILjava_util_Collection() { // Test for method boolean java.util.LinkedList2.addAll(int, // java.util.Collection) ll.addAll(50, new ArrayList(ll)); assertEquals("Returned incorrect size after adding to existing list", 200, ll .size()); for (int i = 0; i < 50; i++) assertEquals("Manipulated elements < index", ll.get(i), objArray[i]); for (int i = 0; i >= 50 && (i < 150); i++) assertTrue("Failed to ad elements properly", ll.get(i) == objArray[i - 50]); for (int i = 0; i >= 150 && (i < 200); i++) assertTrue("Failed to ad elements properly", ll.get(i) == objArray[i - 100]); List myList = db.createLinkedList("testXX"); myList.add(null); myList.add("Blah"); myList.add(null); myList.add("Booga"); myList.add(null); ll.addAll(50, myList); assertNull("a) List w/nulls not added correctly", ll.get(50)); assertEquals("b) List w/nulls not added correctly", "Blah", ll.get(51)); assertNull("c) List w/nulls not added correctly", ll.get(52)); assertEquals("d) List w/nulls not added correctly", "Booga", ll.get(53)); assertNull("e) List w/nulls not added correctly", ll.get(54)); try { ll.addAll(50, null); fail("Should throw NullPointerException"); } catch (NullPointerException e) { // Excepted } } /** * @tests java.util.LinkedList2#addAll(int, java.util.Collection) */ public void test_addAllILjava_util_Collection_2() { // Regression for HARMONY-467 LinkedList2 obj = (LinkedList2) db.createLinkedList("testXX"); try { obj.addAll(-1, (Collection) null); fail("IndexOutOfBoundsException expected"); } catch (IndexOutOfBoundsException e) { } } /** * @tests java.util.LinkedList2#addAll(java.util.Collection) */ public void test_addAllLjava_util_Collection() { // Test for method boolean // java.util.LinkedList2.addAll(java.util.Collection) List l = new ArrayList(); l.addAll(new ArrayList(ll)); for (int i = 0; i < ll.size(); i++) assertTrue("Failed to add elements properly", l.get(i).equals( ll.get(i))); ll.addAll(new ArrayList(ll)); assertEquals("Returned incorrect siZe after adding to existing list", 200, ll .size()); for (int i = 0; i < 100; i++) { assertTrue("Added to list in incorrect order", ll.get(i).equals( l.get(i))); assertTrue("Failed to add to existing list", ll.get(i + 100) .equals(l.get(i))); } List myList = db.createLinkedList("testXX"); myList.add(null); myList.add("Blah"); myList.add(null); myList.add("Booga"); myList.add(null); ll.addAll(myList); assertNull("a) List w/nulls not added correctly", ll.get(200)); assertEquals("b) List w/nulls not added correctly", "Blah", ll.get(201)); assertNull("c) List w/nulls not added correctly", ll.get(202)); assertEquals("d) List w/nulls not added correctly", "Booga", ll.get(203)); assertNull("e) List w/nulls not added correctly", ll.get(204)); try { ll.addAll(null); fail("Should throw NullPointerException"); } catch (NullPointerException e) { // Excepted } } /** * @tests java.util.LinkedList2#clear() */ public void test_clear() { // Test for method void java.util.LinkedList2.clear() ll.clear(); for (int i = 0; i < ll.size(); i++) assertNull("Failed to clear list", ll.get(i)); } /** * @tests java.util.LinkedList2#contains(java.lang.Object) */ public void test_containsLjava_lang_Object() { // Test for method boolean // java.util.LinkedList2.contains(java.lang.Object) assertTrue("Returned false for valid element", ll .contains(objArray[99])); assertTrue("Returned false for equal element", ll.contains(new Integer( 8))); assertTrue("Returned true for invalid element", !ll .contains(new Object())); assertTrue("Should not contain null", !ll.contains(null)); ll.add(25, null); assertTrue("Should contain null", ll.contains(null)); } /** * @tests java.util.LinkedList2#get(int) */ public void test_getI() { // Test for method java.lang.Object java.util.LinkedList2.get(int) assertEquals("Returned incorrect element", ll.get(22), objArray[22]); try { ll.get(8765); fail("Failed to throw expected exception for index > size"); } catch (IndexOutOfBoundsException e) { } } /** * @tests java.util.LinkedList2#indexOf(java.lang.Object) */ public void test_indexOfLjava_lang_Object() { // Test for method int java.util.LinkedList2.indexOf(java.lang.Object) assertEquals("Returned incorrect index", 87, ll.indexOf(objArray[87])); assertEquals("Returned index for invalid Object", -1, ll .indexOf(new Object())); ll.add(20, null); ll.add(24, null); assertTrue("Index of null should be 20, but got: " + ll.indexOf(null), ll.indexOf(null) == 20); } /** * @tests java.util.LinkedList2#lastIndexOf(java.lang.Object) */ public void test_lastIndexOfLjava_lang_Object() { // Test for method int // java.util.LinkedList2.lastIndexOf(java.lang.Object) ll.add(new Integer(99)); assertEquals("Returned incorrect index", 100, ll.lastIndexOf(objArray[99])); assertEquals("Returned index for invalid Object", -1, ll .lastIndexOf(new Object())); ll.add(20, null); ll.add(24, null); assertTrue("Last index of null should be 20, but got: " + ll.lastIndexOf(null), ll.lastIndexOf(null) == 24); } /** * @tests java.util.LinkedList2#listIterator(int) */ public void test_listIteratorI() { // Test for method java.util.ListIterator // java.util.LinkedList2.listIterator(int) ListIterator i = ll.listIterator(); Object elm; int n = 0; while (i.hasNext()) { if (n == 0 || n == objArray.length - 1) { if (n == 0) assertTrue("First element claimed to have a previous", !i .hasPrevious()); if (n == objArray.length) assertTrue("Last element claimed to have next", !i .hasNext()); } elm = i.next(); assertEquals("Iterator returned elements in wrong order", elm, objArray[n]); if (n > 0 && n < objArray.length - 1) { assertEquals("Next index returned incorrect value", i.nextIndex(), n + 1); assertEquals("previousIndex returned incorrect value : " + i.previousIndex() + ", n val: " + n, i .previousIndex(), n); } ++n; } List myList = db.createLinkedList("testXX"); myList.add(null); myList.add("Blah"); myList.add(null); myList.add("Booga"); myList.add(null); ListIterator li = myList.listIterator(); assertTrue("li.hasPrevious() should be false", !li.hasPrevious()); assertNull("li.next() should be null", li.next()); assertTrue("li.hasPrevious() should be true", li.hasPrevious()); assertNull("li.prev() should be null", li.previous()); assertNull("li.next() should be null", li.next()); assertEquals("li.next() should be Blah", "Blah", li.next()); assertNull("li.next() should be null", li.next()); assertEquals("li.next() should be Booga", "Booga", li.next()); assertTrue("li.hasNext() should be true", li.hasNext()); assertNull("li.next() should be null", li.next()); assertTrue("li.hasNext() should be false", !li.hasNext()); } /** * @tests java.util.LinkedList2#remove(int) */ public void test_removeI() { // Test for method java.lang.Object java.util.LinkedList2.remove(int) ll.remove(10); assertEquals("Failed to remove element", -1, ll.indexOf(objArray[10])); try { ll.remove(999); fail("Failed to throw expected exception when index out of range"); } catch (IndexOutOfBoundsException e) { // Correct } ll.add(20, null); ll.remove(20); assertNotNull("Should have removed null", ll.get(20)); } /** * @tests java.util.LinkedList2#remove(java.lang.Object) */ public void test_removeLjava_lang_Object() { // Test for method boolean java.util.LinkedList2.remove(java.lang.Object) assertTrue("Failed to remove valid Object", ll.remove(objArray[87])); assertTrue("Removed invalid object", !ll.remove(new Object())); assertEquals("Found Object after removal", -1, ll.indexOf(objArray[87])); ll.add(null); ll.remove(null); assertTrue("Should not contain null afrer removal", !ll.contains(null)); } /** * @tests java.util.LinkedList2#set(int, java.lang.Object) */ public void test_setILjava_lang_Object() { // Test for method java.lang.Object java.util.LinkedList2.set(int, // java.lang.Object) ll.set(65, "aa"); assertEquals("Failed to set object", ll.get(65), "aa"); } /** * @tests java.util.LinkedList2#size() */ public void test_size() { // Test for method int java.util.LinkedList2.size() assertEquals("Returned incorrect size", ll.size(), objArray.length); int counter = 0; Iterator iter = ll.iterator(); while (iter.hasNext()) { counter++; iter.next(); } assertEquals("Returned incorrect size", counter, objArray.length); ll.remove(0); assertEquals("Returned incorrect size", ll.size(), objArray.length - 1); } /** * @tests java.util.LinkedList2#toArray() */ public void test_toArray() { // Test for method java.lang.Object [] java.util.LinkedList2.toArray() ll.add(null); Object[] obj = ll.toArray(); assertEquals("Returned array of incorrect size", objArray.length + 1, obj.length); for (int i = 0; i < obj.length - 1; i++) assertEquals("Returned incorrect array: " + i, obj[i], objArray[i]); assertNull("Returned incorrect array--end isn't null", obj[obj.length - 1]); } /** * @tests java.util.LinkedList2#toArray(java.lang.Object[]) */ public void test_toArray$Ljava_lang_Object() { // Test for method java.lang.Object [] // java.util.LinkedList2.toArray(java.lang.Object []) Integer[] argArray = new Integer[100]; Object[] retArray; retArray = ll.toArray(argArray); assertTrue("Returned different array than passed", retArray == argArray); List retList = db.createLinkedList("testXX1"); retList.addAll(Arrays.asList(retArray)); Iterator li = ll.iterator(); Iterator ri = retList.iterator(); while (li.hasNext()) assertEquals( li.next() , ri.next()); argArray = new Integer[1000]; retArray = ll.toArray(argArray); assertNull("Failed to set first extra element to null", argArray[ll .size()]); for (int i = 0; i < ll.size(); i++) assertEquals("Returned incorrect array: " + i, retArray[i], objArray[i]); ll.add(50, null); argArray = new Integer[101]; retArray = ll.toArray(argArray); assertTrue("Returned different array than passed", retArray == argArray); retArray = ll.toArray(argArray); assertTrue("Returned different array than passed", retArray == argArray); retList = db.createLinkedList("testXX2"); retList.addAll(Arrays.asList(retArray)); li = ll.iterator(); ri = retList.iterator(); while (li.hasNext()) assertTrue("Lists are not equal", li.next() == ri.next()); } /** * @tests {@link java.util.LinkedList#remove()} */ public void test_remove() { for (int i = 0; i < objArray.length; i++) { assertEquals("should remove the head", objArray[i], ll.remove(0)); } assertEquals("should be empty", 0, ll.size()); try { ll.remove(0); fail("IndexOutOfBoundsException is expected when removing from the empty list"); } catch (IndexOutOfBoundsException e) { //-- expected } } /** * Sets up the fixture, for example, open a network connection. This method * is called before a test is executed. */ public void setUp() throws Exception { super.setUp(); this.db = newDBCache(); ll = (LinkedList2) db.createLinkedList("ll"); for (int i = 0; i < objArray.length; i++) { ll.add(objArray[i]); } testList = (LinkedList2) db.createLinkedList("testList"); testObjOne = new Object(); testObjTwo = new Object(); testObjThree = new Object(); testObjFour = new Object(); testObjLast = new Object(); } } ================================================ FILE: src/test/java/org/apache/jdbm/LogicalRowIdManagerTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; /** * This class contains all Unit tests for {@link LogicalRowIdManager}. */ public class LogicalRowIdManagerTest extends TestCaseWithTestFile { /** * Test constructor */ public void testCtor() throws Exception { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); PageFile free = newRecordFile(); PageManager pmfree = new PageManager(free); LogicalRowIdManager logMgr = new LogicalRowIdManager(f, pm); f.forceClose(); } /** * Test basics */ public void testBasics() throws Exception { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); PageFile free = newRecordFile(); PageManager pmfree = new PageManager(free); LogicalRowIdManager logMgr = new LogicalRowIdManager(f, pm); long physid = 20<< Storage.PAGE_SIZE_SHIFT + 234; long logid = logMgr.insert(physid); assertEquals("check one", physid, logMgr.fetch(logid)); physid = 10 << Storage.PAGE_SIZE_SHIFT + 567; logMgr.update(logid, physid); assertEquals("check two", physid, logMgr.fetch(logid)); logMgr.delete(logid); f.forceClose(); } public void testFreeBasics() throws Exception { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); LogicalRowIdManager freeMgr = new LogicalRowIdManager( f, pm); // allocate a rowid - should fail on an empty file long loc = freeMgr.getFreeSlot(); assertTrue("loc is not null?", loc == 0); pm.close(); f.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/LongHashMapTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.util.Iterator; import java.util.Random; import java.util.TreeMap; import junit.framework.TestCase; public class LongHashMapTest extends TestCase { public void testAll() { LongHashMap t = new LongHashMap(); t.put(1, "aa"); t.put(2, "bb"); t.put(2, "bb"); t.put(4, "cc"); t.put(9, "FF"); assertEquals(4, t.size()); t.remove(1); assertEquals(3, t.size()); assertEquals(t.get(1), null); assertEquals(t.get(2), "bb"); assertEquals(t.get(3), null); assertEquals(t.get(4), "cc"); assertEquals(t.get(5), null); assertEquals(t.get(-1), null); assertEquals(t.get(9), "FF"); Iterator vals = t.valuesIterator(); assertTrue(vals.hasNext()); assertEquals(vals.next(), "bb"); assertTrue(vals.hasNext()); assertEquals(vals.next(), "cc"); assertTrue(vals.hasNext()); assertEquals(vals.next(), "FF"); assertFalse(vals.hasNext()); t.clear(); assertEquals(0, t.size()); t.put(2, "bb"); assertEquals(1, t.size()); assertEquals(t.get(1), null); assertEquals(t.get(2), "bb"); assertEquals(t.get(3), null); } public void testRandomCompare() { LongHashMap v1 = new LongHashMap(); TreeMap v2 = new TreeMap(); Random d = new Random(); for (int i = 0; i < 1000; i++) { long key = d.nextInt() % 100; double random = d.nextDouble(); if (random < 0.8) { // System.out.println("put "+key); v1.put(key, "" + key); v2.put(key, "" + key); } else { // System.out.println("remove "+key); v1.remove(key); v2.remove(key); } checkEquals(v1, v2); } } public void checkEquals(LongHashMap v1, TreeMap v2) { assertEquals(v1.size(), v2.size()); for (long k : v2.keySet()) { assertEquals(v1.get(k), v2.get(k)); } int counter = 0; Iterator it = v1.valuesIterator(); while (it.hasNext()) { String v = it.next(); long key = Long.valueOf(v); assertEquals(v1.get(key), v); assertEquals("" + key, v); counter++; } assertEquals(counter, v2.size()); } public void test2() { LongHashMap v1 = new LongHashMap(); v1.put(1611, "1611"); v1.put(15500, "15500"); v1.put(9446, "9446"); System.out.println(v1.get(9446)); System.out.println(v1.toString()); assertEquals(3, v1.size()); assertEquals(v1.get(9446), "9446"); } public void testMemoryConsuptio() { System.out.println("Memory available: " + (Runtime.getRuntime().maxMemory() / 1e6) + "MB"); System.out.println("Memory used: " + ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1e6) + "MB"); long counter = 0; LongHashMap e = new LongHashMap(); //LongKeyChainedHashMap e = new LongKeyChainedHashMap(); //LongTreeMap e = new LongTreeMap(); while (counter < 1e6) { counter++; e.put(counter, ""); } System.out.println(counter + " items"); System.out.println("Memory used: " + ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1e6) + "MB"); } } ================================================ FILE: src/test/java/org/apache/jdbm/LongTreeMap.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import java.util.ConcurrentModificationException; import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; /** * B-Tree Map which uses primitive long as key. * Main advantage is new instanceof of Long does not have to be created for each lookup. *

* This code comes from Android, which in turns comes to Apache Harmony. * This class was modified to use primitive longs and stripped down to consume less space. *

* Author of JDBM modifications: Jan Kotek *

* It is much slower then LongKeyChainedHashMap, but may be usefull in future for better licence. * * @param */ public class LongTreeMap { private Entry root; private int size; /** * counts modifications to throw ConcurrentAccessException */ private transient int modCount; /** * Returns the value of the mapping with the specified key. * * @param key the key. * @return the value of the mapping with the specified key. * @throws ClassCastException if the key cannot be compared with the keys in this map. * @throws NullPointerException if the key is {@code null} and the comparator cannot handle * {@code null}. * @since Android 1.0 */ public V get(long key) { Entry node = find(key); if (node != null) { return node.value; } return null; } /** * Maps the specified key to the specified value. * * @param key the key. * @param value the value. * @return the value of any previous mapping with the specified key or * {@code null} if there was no mapping. * @throws ClassCastException if the specified key cannot be compared with the keys in this * map. * @throws NullPointerException if the specified key is {@code null} and the comparator * cannot handle {@code null} keys. * @since Android 1.0 */ public V put(long key, V value) { Entry entry = rbInsert(key); V result = entry.value; entry.value = value; return result; } /** * Removes the mapping with the specified key from this map. * * @param key the key of the mapping to remove. * @return the value of the removed mapping or {@code null} if no mapping * for the specified key was found. * @throws ClassCastException if the specified key cannot be compared with the keys in this * map. * @throws NullPointerException if the specified key is {@code null} and the comparator * cannot handle {@code null} keys. * @since Android 1.0 */ public V remove(long key) { if (size == 0) { return null; } Entry node = find(key); if (node == null) { return null; } V result = node.value; rbDelete(node); return result; } /** * Removes all mappings from this TreeMap, leaving it empty. * * @see Map#isEmpty() * @see #size() * @since Android 1.0 */ public void clear() { root = null; size = 0; modCount++; } /** * Entry is an internal class which is used to hold the entries of a * TreeMap. */ private static class Entry { Entry parent, left, right; long key; V value; boolean color; Entry(long key, V value) { this.key = key; this.value = value; } public String toString() { return super.toString() + " - " + key + " - " + value; } } /** * @return iterator over values in map */ public Iterator valuesIterator() { return new ValueIterator(); } /** * @return iterator over keys in map */ public LongIterator keyIterator() { return new LongIterator(); } private class MapIterator { int expectedModCount; Entry node; Entry lastNode; MapIterator() { expectedModCount = modCount; if (root != null) node = minimum(root); } public boolean hasNext() { return node != null; } final public void remove() { if (expectedModCount == modCount) { if (lastNode != null) { rbDelete(lastNode); lastNode = null; expectedModCount++; } else { throw new IllegalStateException(); } } else { throw new ConcurrentModificationException(); } } final void makeNext() { if (expectedModCount != modCount) { throw new ConcurrentModificationException(); } else if (node == null) { throw new NoSuchElementException(); } lastNode = node; node = successor(node); } } private class ValueIterator extends MapIterator implements Iterator { public V next() { makeNext(); return lastNode.value; } } public class LongIterator extends MapIterator implements Iterator { public Long next() { makeNext(); return lastNode.key; } public long nextLong() { makeNext(); return lastNode.key; } } public boolean isEmpty() { return size == 0; } public int size() { return size; } public String toString() { String s = this.getClass().getName(); s += "["; LongIterator iter = keyIterator(); boolean first = true; while (iter.hasNext()) { if (!first) { s += ", "; } first = false; long k = iter.nextLong(); s += k + "=" + get(k); } s += "]"; return s; } private Entry find(long object) { Entry x = root; while (x != null) { // result = object != null ? object.compareTo(x.key) : comparator // .compare(key, x.key); // if (result == 0) { // return x; // } // x = result < 0 ? x.left : x.right; if (object == x.key) return x; x = object < x.key ? x.left : x.right; } return null; } private Entry minimum(Entry x) { while (x.left != null) { x = x.left; } return x; } Entry successor(Entry x) { if (x.right != null) { return minimum(x.right); } Entry y = x.parent; while (y != null && x == y.right) { x = y; y = y.parent; } return y; } void rbDelete(Entry z) { Entry y = z.left == null || z.right == null ? z : successor(z); Entry x = y.left != null ? y.left : y.right; if (x != null) { x.parent = y.parent; } if (y.parent == null) { root = x; } else if (y == y.parent.left) { y.parent.left = x; } else { y.parent.right = x; } modCount++; if (y != z) { z.key = y.key; z.value = y.value; } if (!y.color && root != null) { if (x == null) { fixup(y.parent); } else { fixup(x); } } size--; } private void fixup(Entry x) { Entry w; while (x != root && !x.color) { if (x == x.parent.left) { w = x.parent.right; if (w == null) { x = x.parent; continue; } if (w.color) { w.color = false; x.parent.color = true; leftRotate(x.parent); w = x.parent.right; if (w == null) { x = x.parent; continue; } } if ((w.left == null || !w.left.color) && (w.right == null || !w.right.color)) { w.color = true; x = x.parent; } else { if (w.right == null || !w.right.color) { w.left.color = false; w.color = true; rightRotate(w); w = x.parent.right; } w.color = x.parent.color; x.parent.color = false; w.right.color = false; leftRotate(x.parent); x = root; } } else { w = x.parent.left; if (w == null) { x = x.parent; continue; } if (w.color) { w.color = false; x.parent.color = true; rightRotate(x.parent); w = x.parent.left; if (w == null) { x = x.parent; continue; } } if ((w.left == null || !w.left.color) && (w.right == null || !w.right.color)) { w.color = true; x = x.parent; } else { if (w.left == null || !w.left.color) { w.right.color = false; w.color = true; leftRotate(w); w = x.parent.left; } w.color = x.parent.color; x.parent.color = false; w.left.color = false; rightRotate(x.parent); x = root; } } } x.color = false; } private void leftRotate(Entry x) { Entry y = x.right; x.right = y.left; if (y.left != null) { y.left.parent = x; } y.parent = x.parent; if (x.parent == null) { root = y; } else { if (x == x.parent.left) { x.parent.left = y; } else { x.parent.right = y; } } y.left = x; x.parent = y; } private void rightRotate(Entry x) { Entry y = x.left; x.left = y.right; if (y.right != null) { y.right.parent = x; } y.parent = x.parent; if (x.parent == null) { root = y; } else { if (x == x.parent.right) { x.parent.right = y; } else { x.parent.left = y; } } y.right = x; x.parent = y; } private Entry rbInsert(long object) { boolean smaller = false; Entry y = null; if (size != 0) { Entry x = root; while (x != null) { y = x; // result = key != null ? key.compareTo(x.key) : comparator // .compare(object, x.key); // if (result == 0) { // return x; // } // x = result < 0 ? x.left : x.right; if (object == x.key) return x; if (object < x.key) { x = x.left; smaller = true; } else { x = x.right; smaller = false; } } } size++; modCount++; Entry z = new Entry(object, null); if (y == null) { return root = z; } z.parent = y; if (smaller) { y.left = z; } else { y.right = z; } balance(z); return z; } void balance(Entry x) { Entry y; x.color = true; while (x != root && x.parent.color) { if (x.parent == x.parent.parent.left) { y = x.parent.parent.right; if (y != null && y.color) { x.parent.color = false; y.color = false; x.parent.parent.color = true; x = x.parent.parent; } else { if (x == x.parent.right) { x = x.parent; leftRotate(x); } x.parent.color = false; x.parent.parent.color = true; rightRotate(x.parent.parent); } } else { y = x.parent.parent.left; if (y != null && y.color) { x.parent.color = false; y.color = false; x.parent.parent.color = true; x = x.parent.parent; } else { if (x == x.parent.left) { x = x.parent; rightRotate(x); } x.parent.color = false; x.parent.parent.color = true; leftRotate(x.parent.parent); } } } root.color = false; } } ================================================ FILE: src/test/java/org/apache/jdbm/MapInterfaceTest.java ================================================ /* * Copyright (C) 2008 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jdbm; import static java.util.Collections.singleton; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; import java.util.Map.Entry; import junit.framework.TestCase; /** * Tests representing the contract of {@link Map}. Concrete subclasses of this * base class test conformance of concrete {@link Map} subclasses to that * contract. *

* TODO: Descriptive assertion messages, with hints as to probable * fixes. * TODO: Add another constructor parameter indicating whether the * class under test is ordered, and check the order if so. * TODO: Refactor to share code with SetTestBuilder &c. * * @param the type of keys used by the maps under test * @param the type of mapped values used the maps under test * @author George van den Driessche */ public abstract class MapInterfaceTest extends TestCase { protected final boolean supportsPut; protected final boolean supportsRemove; protected final boolean supportsClear; protected final boolean allowsNullKeys; protected final boolean allowsNullValues; protected final boolean supportsIteratorRemove; /** * Creates a new, empty instance of the class under test. * * @return a new, empty map instance. * @throws UnsupportedOperationException if it's not possible to make an * empty instance of the class under test. */ protected abstract Map makeEmptyMap() throws UnsupportedOperationException; /** * Creates a new, non-empty instance of the class under test. * * @return a new, non-empty map instance. * @throws UnsupportedOperationException if it's not possible to make a * non-empty instance of the class under test. */ protected abstract Map makePopulatedMap() throws UnsupportedOperationException; /** * Creates a new key that is not expected to be found * in {@link #makePopulatedMap()}. * * @return a key. * @throws UnsupportedOperationException if it's not possible to make a key * that will not be found in the map. */ protected abstract K getKeyNotInPopulatedMap() throws UnsupportedOperationException; /** * Creates a new value that is not expected to be found * in {@link #makePopulatedMap()}. * * @return a value. * @throws UnsupportedOperationException if it's not possible to make a value * that will not be found in the map. */ protected abstract V getValueNotInPopulatedMap() throws UnsupportedOperationException; /** * Constructor with an explicit {@code supportsIteratorRemove} parameter. */ protected MapInterfaceTest( boolean allowsNullKeys, boolean allowsNullValues, boolean supportsPut, boolean supportsRemove, boolean supportsClear, boolean supportsIteratorRemove) { this.supportsPut = supportsPut; this.supportsRemove = supportsRemove; this.supportsClear = supportsClear; this.allowsNullKeys = allowsNullKeys; this.allowsNullValues = allowsNullValues; this.supportsIteratorRemove = supportsIteratorRemove; } /** * Used by tests that require a map, but don't care whether it's * populated or not. * * @return a new map instance. */ protected Map makeEitherMap() { try { return makePopulatedMap(); } catch (UnsupportedOperationException e) { return makeEmptyMap(); } } protected final boolean supportsValuesHashCode(Map map) { // get the first non-null value Collection values = map.values(); for (V value : values) { if (value != null) { try { value.hashCode(); } catch (Exception e) { return false; } return true; } } return true; } /** * Checks all the properties that should always hold of a map. Also calls * {@link #assertMoreInvariants} to check invariants that are peculiar to * specific implementations. * * @param map the map to check. * @see #assertMoreInvariants */ protected final void assertInvariants(Map map) { Set keySet = map.keySet(); Collection valueCollection = map.values(); Set> entrySet = map.entrySet(); assertEquals(map.size() == 0, map.isEmpty()); assertEquals(map.size(), keySet.size()); assertEquals(keySet.size() == 0, keySet.isEmpty()); assertEquals(!keySet.isEmpty(), keySet.iterator().hasNext()); int expectedKeySetHash = 0; for (K key : keySet) { V value = map.get(key); expectedKeySetHash += key != null ? key.hashCode() : 0; assertTrue(map.containsKey(key)); assertTrue(map.containsValue(value)); assertTrue(valueCollection.contains(value)); assertTrue(valueCollection.containsAll(Collections.singleton(value))); assertTrue(entrySet.contains(mapEntry(key, value))); assertTrue(allowsNullKeys || (key != null)); } assertEquals(expectedKeySetHash, keySet.hashCode()); assertEquals(map.size(), valueCollection.size()); assertEquals(valueCollection.size() == 0, valueCollection.isEmpty()); assertEquals( !valueCollection.isEmpty(), valueCollection.iterator().hasNext()); for (V value : valueCollection) { assertTrue(map.containsValue(value)); assertTrue(allowsNullValues || (value != null)); } assertEquals(map.size(), entrySet.size()); assertEquals(entrySet.size() == 0, entrySet.isEmpty()); assertEquals(!entrySet.isEmpty(), entrySet.iterator().hasNext()); assertTrue(!entrySet.contains("foo")); boolean supportsValuesHashCode = supportsValuesHashCode(map); if (supportsValuesHashCode) { int expectedEntrySetHash = 0; for (Entry entry : entrySet) { assertTrue(map.containsKey(entry.getKey())); assertTrue(map.containsValue(entry.getValue())); int expectedHash = (entry.getKey() == null ? 0 : entry.getKey().hashCode()) ^ (entry.getValue() == null ? 0 : entry.getValue().hashCode()); assertEquals(expectedHash, entry.hashCode()); expectedEntrySetHash += expectedHash; } assertEquals(expectedEntrySetHash, entrySet.hashCode()); assertTrue(entrySet.containsAll(new HashSet>(entrySet))); assertTrue(entrySet.equals(new HashSet>(entrySet))); } Object[] entrySetToArray1 = entrySet.toArray(); assertEquals(map.size(), entrySetToArray1.length); assertTrue(Arrays.asList(entrySetToArray1).containsAll(entrySet)); Entry[] entrySetToArray2 = new Entry[map.size() + 2]; entrySetToArray2[map.size()] = mapEntry("foo", 1); assertSame(entrySetToArray2, entrySet.toArray(entrySetToArray2)); assertNull(entrySetToArray2[map.size()]); assertTrue(Arrays.asList(entrySetToArray2).containsAll(entrySet)); Object[] valuesToArray1 = valueCollection.toArray(); assertEquals(map.size(), valuesToArray1.length); assertTrue(Arrays.asList(valuesToArray1).containsAll(valueCollection)); Object[] valuesToArray2 = new Object[map.size() + 2]; valuesToArray2[map.size()] = "foo"; assertSame(valuesToArray2, valueCollection.toArray(valuesToArray2)); assertNull(valuesToArray2[map.size()]); assertTrue(Arrays.asList(valuesToArray2).containsAll(valueCollection)); if (supportsValuesHashCode) { int expectedHash = 0; for (Entry entry : entrySet) { expectedHash += entry.hashCode(); } assertEquals(expectedHash, map.hashCode()); } assertMoreInvariants(map); } /** * Override this to check invariants which should hold true for a particular * implementation, but which are not generally applicable to every instance * of Map. * * @param map the map whose additional invariants to check. */ protected void assertMoreInvariants(Map map) { } public void testClear() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (supportsClear) { map.clear(); assertTrue(map.isEmpty()); } else { try { map.clear(); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testContainsKey() { final Map map; final K unmappedKey; try { map = makePopulatedMap(); unmappedKey = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertTrue(!map.containsKey(unmappedKey)); assertTrue(map.containsKey(map.keySet().iterator().next())); if (allowsNullKeys) { map.containsKey(null); } else { try { map.containsKey(null); } catch (NullPointerException optional) { } } assertInvariants(map); } public void testContainsValue() { final Map map; final V unmappedValue; try { map = makePopulatedMap(); unmappedValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertTrue(!map.containsValue(unmappedValue)); assertTrue(map.containsValue(map.values().iterator().next())); if (allowsNullValues) { map.containsValue(null); } else { try { map.containsKey(null); } catch (NullPointerException optional) { } } assertInvariants(map); } public void testEntrySet() { final Map map; final Set> entrySet; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); entrySet = map.entrySet(); final K unmappedKey; final V unmappedValue; try { unmappedKey = getKeyNotInPopulatedMap(); unmappedValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } for (Entry entry : entrySet) { assertTrue(!unmappedKey.equals(entry.getKey())); assertTrue(!unmappedValue.equals(entry.getValue())); } } public void testEntrySetForEmptyMap() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); } public void testEntrySetContainsEntryNullKeyPresent() { if (!allowsNullKeys || !supportsPut) { return; } final Map map; final Set> entrySet; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); entrySet = map.entrySet(); final V unmappedValue; try { unmappedValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } map.put(null, unmappedValue); Entry entry = mapEntry(null, unmappedValue); assertTrue(entrySet.contains(entry)); assertTrue(!entrySet.contains(mapEntry(null, null))); } public void testEntrySetContainsEntryNullKeyMissing() { final Map map; final Set> entrySet; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); entrySet = map.entrySet(); final V unmappedValue; try { unmappedValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } Entry entry = mapEntry(null, unmappedValue); assertTrue(!entrySet.contains(entry)); assertTrue(!entrySet.contains(mapEntry(null, null))); } public void testEntrySetIteratorRemove() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Iterator> iterator = entrySet.iterator(); if (supportsIteratorRemove) { int initialSize = map.size(); Entry entry = iterator.next(); iterator.remove(); assertEquals(initialSize - 1, map.size()); assertTrue(!entrySet.contains(entry)); assertInvariants(map); try { iterator.remove(); fail("Expected IllegalStateException."); } catch (IllegalStateException e) { // Expected. } } else { try { iterator.next(); iterator.remove(); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testEntrySetRemove() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); if (supportsRemove) { int initialSize = map.size(); boolean didRemove = entrySet.remove(entrySet.iterator().next()); assertTrue(didRemove); assertEquals(initialSize - 1, map.size()); } else { try { entrySet.remove(entrySet.iterator().next()); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testEntrySetRemoveMissingKey() { final Map map; final K key; try { map = makeEitherMap(); key = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Entry entry = mapEntry(key, getValueNotInPopulatedMap()); int initialSize = map.size(); if (supportsRemove) { boolean didRemove = entrySet.remove(entry); assertTrue(!didRemove); } else { try { boolean didRemove = entrySet.remove(entry); assertTrue(!didRemove); } catch (UnsupportedOperationException optional) { } } assertEquals(initialSize, map.size()); assertTrue(!map.containsKey(key)); assertInvariants(map); } public void testEntrySetRemoveDifferentValue() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); K key = map.keySet().iterator().next(); Entry entry = mapEntry(key, getValueNotInPopulatedMap()); int initialSize = map.size(); if (supportsRemove) { boolean didRemove = entrySet.remove(entry); assertTrue(!didRemove); } else { try { boolean didRemove = entrySet.remove(entry); assertTrue(!didRemove); } catch (UnsupportedOperationException optional) { } } assertEquals(initialSize, map.size()); assertTrue(map.containsKey(key)); assertInvariants(map); } public void testEntrySetRemoveNullKeyPresent() { if (!allowsNullKeys || !supportsPut || !supportsRemove) { return; } final Map map; final Set> entrySet; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); entrySet = map.entrySet(); final V unmappedValue; try { unmappedValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } map.put(null, unmappedValue); assertEquals(unmappedValue, map.get(null)); assertTrue(map.containsKey(null)); Entry entry = mapEntry(null, unmappedValue); assertTrue(entrySet.remove(entry)); assertNull(map.get(null)); assertTrue(!map.containsKey(null)); } public void testEntrySetRemoveNullKeyMissing() { final Map map; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Entry entry = mapEntry(null, getValueNotInPopulatedMap()); int initialSize = map.size(); if (supportsRemove) { boolean didRemove = entrySet.remove(entry); assertTrue(!didRemove); } else { try { boolean didRemove = entrySet.remove(entry); assertTrue(!didRemove); } catch (UnsupportedOperationException optional) { } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testEntrySetRemoveAll() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Set> entriesToRemove = singleton(entrySet.iterator().next()); if (supportsRemove) { int initialSize = map.size(); boolean didRemove = entrySet.removeAll(entriesToRemove); assertTrue(didRemove); assertEquals(initialSize - entriesToRemove.size(), map.size()); for (Entry entry : entriesToRemove) { assertTrue(!entrySet.contains(entry)); } } else { try { entrySet.removeAll(entriesToRemove); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testEntrySetRemoveAllNullFromEmpty() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); if (supportsRemove) { try { entrySet.removeAll(null); fail("Expected NullPointerException."); } catch (NullPointerException e) { // Expected. } } else { try { entrySet.removeAll(null); fail("Expected UnsupportedOperationException or NullPointerException."); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertInvariants(map); } public void testEntrySetRetainAll() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Set> entriesToRetain = singleton(entrySet.iterator().next()); if (supportsRemove) { boolean shouldRemove = (entrySet.size() > entriesToRetain.size()); boolean didRemove = entrySet.retainAll(entriesToRetain); assertEquals(shouldRemove, didRemove); assertEquals(entriesToRetain.size(), map.size()); for (Entry entry : entriesToRetain) { assertTrue(entrySet.contains(entry)); } } else { try { entrySet.retainAll(entriesToRetain); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testEntrySetRetainAllNullFromEmpty() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); if (supportsRemove) { try { entrySet.retainAll(null); // Returning successfully is not ideal, but tolerated. } catch (NullPointerException e) { // Expected. } } else { try { entrySet.retainAll(null); // We have to tolerate a successful return (Sun bug 4802647) } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertInvariants(map); } public void testEntrySetClear() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); if (supportsClear) { entrySet.clear(); assertTrue(entrySet.isEmpty()); } else { try { entrySet.clear(); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testEntrySetAddAndAddAll() { final Map map = makeEitherMap(); Set> entrySet = map.entrySet(); final Entry entryToAdd = mapEntry(null, null); try { entrySet.add(entryToAdd); fail("Expected UnsupportedOperationException or NullPointerException."); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } assertInvariants(map); try { entrySet.addAll(singleton(entryToAdd)); fail("Expected UnsupportedOperationException or NullPointerException."); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } assertInvariants(map); } public void testEntrySetSetValue() { // TODO: Investigate the extent to which, in practice, maps that support // put() also support Entry.setValue(). if (!supportsPut) { return; } final Map map; final V valueToSet; try { map = makePopulatedMap(); valueToSet = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Entry entry = entrySet.iterator().next(); final V oldValue = entry.getValue(); final V returnedValue = entry.setValue(valueToSet); assertEquals(oldValue, returnedValue); assertTrue(entrySet.contains( mapEntry(entry.getKey(), valueToSet))); assertEquals(valueToSet, map.get(entry.getKey())); assertInvariants(map); } public void testEntrySetSetValueSameValue() { // TODO: Investigate the extent to which, in practice, maps that support // put() also support Entry.setValue(). if (!supportsPut) { return; } final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Set> entrySet = map.entrySet(); Entry entry = entrySet.iterator().next(); final V oldValue = entry.getValue(); final V returnedValue = entry.setValue(oldValue); assertEquals(oldValue, returnedValue); assertTrue(entrySet.contains( mapEntry(entry.getKey(), oldValue))); assertEquals(oldValue, map.get(entry.getKey())); assertInvariants(map); } public void testEqualsForEqualMap() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertEquals(map, map); assertEquals(makePopulatedMap(), map); assertTrue(!map.equals(Collections.emptyMap())); //no-inspection ObjectEqualsNull assertTrue(!map.equals(null)); } public void testEqualsForLargerMap() { if (!supportsPut) { return; } final Map map; final Map largerMap; try { map = makePopulatedMap(); largerMap = makePopulatedMap(); largerMap.put(getKeyNotInPopulatedMap(), getValueNotInPopulatedMap()); } catch (UnsupportedOperationException e) { return; } assertTrue(!map.equals(largerMap)); } public void testEqualsForSmallerMap() { if (!supportsRemove) { return; } final Map map; final Map smallerMap; try { map = makePopulatedMap(); smallerMap = new LinkedHashMap(map); // smallerMap = makePopulatedMap(); smallerMap.remove(smallerMap.keySet().iterator().next()); } catch (UnsupportedOperationException e) { return; } assertTrue(!map.equals(smallerMap)); } public void testEqualsForEmptyMap() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } assertEquals(map, map); assertEquals(makeEmptyMap(), map); assertEquals(Collections.emptyMap(), map); assertTrue(!map.equals(Collections.emptySet())); //noinspection ObjectEqualsNull assertTrue(!map.equals(null)); } public void testGet() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } for (Entry entry : map.entrySet()) { assertEquals(entry.getValue(), map.get(entry.getKey())); } K unmappedKey = null; try { unmappedKey = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertNull(map.get(unmappedKey)); } public void testGetForEmptyMap() { final Map map; K unmappedKey = null; try { map = makeEmptyMap(); unmappedKey = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertNull(map.get(unmappedKey)); } public void testGetNull() { Map map = makeEitherMap(); if (allowsNullKeys) { if (allowsNullValues) { // TODO: decide what to test here. } else { assertEquals(map.containsKey(null), map.get(null) != null); } } else { try { map.get(null); } catch (NullPointerException optional) { } } assertInvariants(map); } public void testHashCode() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); } public void testHashCodeForEmptyMap() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); } public void testPutNewKey() { final Map map = makeEitherMap(); final K keyToPut; final V valueToPut; try { keyToPut = getKeyNotInPopulatedMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (supportsPut) { int initialSize = map.size(); V oldValue = map.put(keyToPut, valueToPut); assertEquals(valueToPut, map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(valueToPut)); assertEquals(initialSize + 1, map.size()); assertNull(oldValue); } else { try { map.put(keyToPut, valueToPut); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testPutExistingKey() { final Map map; final K keyToPut; final V valueToPut; try { map = makePopulatedMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToPut = map.keySet().iterator().next(); if (supportsPut) { int initialSize = map.size(); map.put(keyToPut, valueToPut); assertEquals(valueToPut, map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(valueToPut)); assertEquals(initialSize, map.size()); } else { try { map.put(keyToPut, valueToPut); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testPutNullKey() { if (!supportsPut) { return; } final Map map = makeEitherMap(); final V valueToPut; try { valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (allowsNullKeys) { final V oldValue = map.get(null); final V returnedValue = map.put(null, valueToPut); assertEquals(oldValue, returnedValue); assertEquals(valueToPut, map.get(null)); assertTrue(map.containsKey(null)); assertTrue(map.containsValue(valueToPut)); } else { try { map.put(null, valueToPut); fail("Expected RuntimeException"); } catch (RuntimeException e) { // Expected. } } assertInvariants(map); } public void testPutNullValue() { if (!supportsPut) { return; } final Map map = makeEitherMap(); final K keyToPut; try { keyToPut = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (allowsNullValues) { int initialSize = map.size(); final V oldValue = map.get(keyToPut); final V returnedValue = map.put(keyToPut, null); assertEquals(oldValue, returnedValue); assertNull(map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(null)); assertEquals(initialSize + 1, map.size()); } else { try { map.put(keyToPut, null); fail("Expected RuntimeException"); } catch (RuntimeException e) { // Expected. } } assertInvariants(map); } public void testPutNullValueForExistingKey() { if (!supportsPut) { return; } final Map map; final K keyToPut; try { map = makePopulatedMap(); keyToPut = map.keySet().iterator().next(); } catch (UnsupportedOperationException e) { return; } if (allowsNullValues) { int initialSize = map.size(); final V oldValue = map.get(keyToPut); final V returnedValue = map.put(keyToPut, null); assertEquals(oldValue, returnedValue); assertNull(map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(null)); assertEquals(initialSize, map.size()); } else { try { map.put(keyToPut, null); fail("Expected RuntimeException"); } catch (RuntimeException e) { // Expected. } } assertInvariants(map); } public void testPutAllNewKey() { final Map map = makeEitherMap(); final K keyToPut; final V valueToPut; try { keyToPut = getKeyNotInPopulatedMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } final Map mapToPut = Collections.singletonMap(keyToPut, valueToPut); if (supportsPut) { int initialSize = map.size(); map.putAll(mapToPut); assertEquals(valueToPut, map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(valueToPut)); assertEquals(initialSize + 1, map.size()); } else { try { map.putAll(mapToPut); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testPutAllExistingKey() { final Map map; final K keyToPut; final V valueToPut; try { map = makePopulatedMap(); valueToPut = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToPut = map.keySet().iterator().next(); final Map mapToPut = Collections.singletonMap(keyToPut, valueToPut); int initialSize = map.size(); if (supportsPut) { map.putAll(mapToPut); assertEquals(valueToPut, map.get(keyToPut)); assertTrue(map.containsKey(keyToPut)); assertTrue(map.containsValue(valueToPut)); } else { try { map.putAll(mapToPut); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testRemove() { final Map map; final K keyToRemove; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } keyToRemove = map.keySet().iterator().next(); if (supportsRemove) { int initialSize = map.size(); V expectedValue = map.get(keyToRemove); V oldValue = map.remove(keyToRemove); assertEquals(expectedValue, oldValue); assertTrue(!map.containsKey(keyToRemove)); assertEquals(initialSize - 1, map.size()); } else { try { map.remove(keyToRemove); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testRemoveMissingKey() { final Map map; final K keyToRemove; try { map = makePopulatedMap(); keyToRemove = getKeyNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } if (supportsRemove) { int initialSize = map.size(); assertNull(map.remove(keyToRemove)); assertEquals(initialSize, map.size()); } else { try { map.remove(keyToRemove); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testSize() { assertInvariants(makeEitherMap()); } public void testKeySetClear() { final Map map; try { map = makeEitherMap(); } catch (UnsupportedOperationException e) { return; } Set keySet = map.keySet(); if (supportsClear) { keySet.clear(); assertTrue(keySet.isEmpty()); } else { try { keySet.clear(); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testKeySetRemoveAllNullFromEmpty() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } Set keySet = map.keySet(); if (supportsRemove) { try { keySet.removeAll(null); fail("Expected NullPointerException."); } catch (NullPointerException e) { // Expected. } } else { try { keySet.removeAll(null); fail("Expected UnsupportedOperationException or NullPointerException."); } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertInvariants(map); } public void testKeySetRetainAllNullFromEmpty() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } Set keySet = map.keySet(); if (supportsRemove) { try { keySet.retainAll(null); // Returning successfully is not ideal, but tolerated. } catch (NullPointerException e) { // Expected. } } else { try { keySet.retainAll(null); // We have to tolerate a successful return (Sun bug 4802647) } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertInvariants(map); } public void testValues() { final Map map; final Collection valueCollection; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } assertInvariants(map); valueCollection = map.values(); final V unmappedValue; try { unmappedValue = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } for (V value : valueCollection) { assertTrue(!unmappedValue.equals(value)); } } public void testValuesIteratorRemove() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Collection valueCollection = map.values(); Iterator iterator = valueCollection.iterator(); if (supportsIteratorRemove) { int initialSize = map.size(); iterator.next(); iterator.remove(); assertEquals(initialSize - 1, map.size()); // (We can't assert that the values collection no longer contains the // removed value, because the underlying map can have multiple mappings // to the same value.) assertInvariants(map); try { iterator.remove(); fail("Expected IllegalStateException."); } catch (IllegalStateException e) { // Expected. } } else { try { iterator.next(); iterator.remove(); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testValuesRemove() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Collection valueCollection = map.values(); if (supportsRemove) { int initialSize = map.size(); valueCollection.remove(valueCollection.iterator().next()); assertEquals(initialSize - 1, map.size()); // (We can't assert that the values collection no longer contains the // removed value, because the underlying map can have multiple mappings // to the same value.) } else { try { valueCollection.remove(valueCollection.iterator().next()); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testValuesRemoveMissing() { final Map map; final V valueToRemove; try { map = makeEitherMap(); valueToRemove = getValueNotInPopulatedMap(); } catch (UnsupportedOperationException e) { return; } Collection valueCollection = map.values(); int initialSize = map.size(); if (supportsRemove) { assertTrue(!valueCollection.remove(valueToRemove)); } else { try { assertTrue(!valueCollection.remove(valueToRemove)); } catch (UnsupportedOperationException e) { // Tolerated. } } assertEquals(initialSize, map.size()); assertInvariants(map); } public void testValuesRemoveAll() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Collection valueCollection = map.values(); Set valuesToRemove = singleton(valueCollection.iterator().next()); if (supportsRemove) { valueCollection.removeAll(valuesToRemove); for (V value : valuesToRemove) { assertTrue(!valueCollection.contains(value)); } for (V value : valueCollection) { assertTrue(!valuesToRemove.contains(value)); } } else { try { valueCollection.removeAll(valuesToRemove); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testValuesRemoveAllNullFromEmpty() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } Collection values = map.values(); if (supportsRemove) { try { values.removeAll(null); // Returning successfully is not ideal, but tolerated. } catch (NullPointerException e) { // Expected. } } else { try { values.removeAll(null); // We have to tolerate a successful return (Sun bug 4802647) } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertInvariants(map); } public void testValuesRetainAll() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Collection valueCollection = map.values(); Set valuesToRetain = singleton(valueCollection.iterator().next()); if (supportsRemove) { valueCollection.retainAll(valuesToRetain); for (V value : valuesToRetain) { assertTrue(valueCollection.contains(value)); } for (V value : valueCollection) { assertTrue(valuesToRetain.contains(value)); } } else { try { valueCollection.retainAll(valuesToRetain); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } public void testValuesRetainAllNullFromEmpty() { final Map map; try { map = makeEmptyMap(); } catch (UnsupportedOperationException e) { return; } Collection values = map.values(); if (supportsRemove) { try { values.retainAll(null); // Returning successfully is not ideal, but tolerated. } catch (NullPointerException e) { // Expected. } } else { try { values.retainAll(null); // We have to tolerate a successful return (Sun bug 4802647) } catch (UnsupportedOperationException e) { // Expected. } catch (NullPointerException e) { // Expected. } } assertInvariants(map); } public void testValuesClear() { final Map map; try { map = makePopulatedMap(); } catch (UnsupportedOperationException e) { return; } Collection valueCollection = map.values(); if (supportsClear) { valueCollection.clear(); assertTrue(valueCollection.isEmpty()); } else { try { valueCollection.clear(); fail("Expected UnsupportedOperationException."); } catch (UnsupportedOperationException e) { // Expected. } } assertInvariants(map); } private static Entry mapEntry(K key, V value) { return Collections.singletonMap(key, value).entrySet().iterator().next(); } } ================================================ FILE: src/test/java/org/apache/jdbm/ObjectOutputStream2Test.java ================================================ package org.apache.jdbm; import junit.framework.TestCase; import org.apache.jdbm.SerialClassInfoTest.*; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; public class ObjectOutputStream2Test extends TestCase { E neser(E e) throws IOException, ClassNotFoundException { ByteArrayOutputStream i = new ByteArrayOutputStream(); new ObjectOutputStream2(i).writeObject(e); return (E) new ObjectInputStream2(new ByteArrayInputStream(i.toByteArray())).readObject(); } public void testSimple() throws ClassNotFoundException, IOException { Bean1 b = new Bean1("qwe", "rty"); Bean1 b2 = neser(b); assertEquals(b, b2); } } ================================================ FILE: src/test/java/org/apache/jdbm/PageFileTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.File; /** * This class contains all Unit tests for {@link PageFile}. */ final public class PageFileTest extends TestCaseWithTestFile { public static void deleteFile(String filename) { File file = new File(filename); if (file.exists()) { try { file.delete(); } catch (Exception except) { except.printStackTrace(); } if (file.exists()) { System.out.println("WARNING: Cannot delete file: " + file); } } } /** * Test constructor */ public void testCtor() throws Exception { PageFile file = newRecordFile(); file.close(); } /** * Test addition of record 0 */ public void testAddZero() throws Exception { String f = newTestFile(); PageFile file = new PageFile(f); PageIo data = file.get(0); data.writeByte(14, (byte) 'b'); file.release(0, true); file.close(); file = new PageFile(f); data = file.get(0); assertEquals((byte) 'b', data.readByte(14)); file.release(0, false); file.close(); } /** * Test addition of a number of records, with holes. */ public void testWithHoles() throws Exception { String f = newTestFile(); PageFile file = new PageFile(f); // Write recid 0, byte 0 with 'b' PageIo data = file.get(0); data.writeByte(0,(byte) 'b'); file.release(0, true); // Write recid 10, byte 10 with 'c' data = file.get(10); data.writeByte(10, (byte) 'c'); file.release(10, true); // Write recid 5, byte 5 with 'e' data = file.get(5); data.writeByte(5, (byte) 'e'); file.release(5, false); file.close(); file = new PageFile(f); data = file.get(0); assertEquals("0 = b", (byte) 'b', data.readByte(0)); file.release(0, false); data = file.get(5); assertEquals("5 = 0", (byte) 'e', data.readByte(5)); file.release(5, false); data = file.get(10); assertEquals("10 = c", (byte) 'c', data.readByte(10)); file.release(10, false); file.close(); } /** * Test wrong release */ public void testWrongRelease() throws Exception { PageFile file = newRecordFile(); // Write recid 0, byte 0 with 'b' PageIo data = file.get(0); data.writeByte(0, (byte) 'b'); try { file.release(1, true); fail("expected exception"); } catch (NullPointerException except) { // ignore } file.release(0, false); file.close(); // @alex retry to open the file /* file = new PageFile( testFileName ); PageManager pm = new PageManager( file ); pm.close(); file.close(); */ } } ================================================ FILE: src/test/java/org/apache/jdbm/PageIoTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.TestCase; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.nio.ByteBuffer; /** * This class contains all Unit tests for {@link PageIo}. */ public class PageIoTest extends TestCase { private static final short SHORT_VALUE = 0x1234; private static final int INT_VALUE = 0xe7b3c8a1; private static final long LONG_VALUE = 0xfdebca9876543210L; private static final long LONG_VALUE2 = 1231290495545446485L; /** * Test writing */ public void testWrite() throws Exception { byte[] data = new byte[100]; PageIo test = new PageIo(0, data); test.writeShort(0, SHORT_VALUE); test.writeLong(2, LONG_VALUE); test.writeInt(10, INT_VALUE); test.writeLong(14, LONG_VALUE2); DataInputStream is = new DataInputStream(new ByteArrayInputStream(data)); assertEquals("short", SHORT_VALUE, is.readShort()); assertEquals("long", LONG_VALUE, is.readLong()); assertEquals("int", INT_VALUE, is.readInt()); assertEquals("long", LONG_VALUE2, is.readLong()); assertEquals("short", SHORT_VALUE, test.readShort(0)); assertEquals("long", LONG_VALUE, test.readLong(2)); assertEquals("int", INT_VALUE, test.readInt(10)); assertEquals("long", LONG_VALUE2, test.readLong(14)); } /** * Test reading */ public void testRead() throws Exception { ByteArrayOutputStream bos = new ByteArrayOutputStream(100); DataOutputStream os = new DataOutputStream(bos); os.writeShort(SHORT_VALUE); os.writeLong(LONG_VALUE); os.writeInt(INT_VALUE); os.writeLong(LONG_VALUE2); byte[] data = bos.toByteArray(); PageIo test = new PageIo(0, data); assertEquals("short", SHORT_VALUE, test.readShort(0)); assertEquals("long", LONG_VALUE, test.readLong(2)); assertEquals("int", INT_VALUE, test.readInt(10)); assertEquals("long", LONG_VALUE2, test.readLong(14)); } public void testNegativeSixByte(){ PageIo t = new PageIo(0, ByteBuffer.allocate(Storage.PAGE_SIZE)); t.writeSixByteLong(0,-11111); assertEquals(-11111,t.readSixByteLong(0)); t.writeSixByteLong(0,11111); assertEquals(11111,t.readSixByteLong(0)); } public void testPageHeaderSetWriteRead() throws Exception { PageIo data = new PageIo(0, new byte[Storage.PAGE_SIZE]); data.writeShort(0, Magic.PAGE_MAGIC); data.pageHeaderSetNext(10); data.pageHeaderSetPrev(33); assertEquals("next", 10, data.pageHeaderGetNext()); assertEquals("prev", 33, data.pageHeaderGetPrev()); } } ================================================ FILE: src/test/java/org/apache/jdbm/PageManagerTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; /** * This class contains all Unit tests for {@link PageManager}. */ public class PageManagerTest extends TestCaseWithTestFile { /** * Test constructor */ public void testCtor() throws Exception { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); f.forceClose(); } /** * Test allocations on a single list. */ public void testAllocSingleList() throws Exception { String file = newTestFile(); PageFile f = new PageFile(file); PageManager pm = new PageManager(f); for (int i = 0; i < 100; i++) { assertEquals("allocate ", (long) i + 1, pm.allocate(Magic.USED_PAGE)); } pm.close(); f.close(); f = new PageFile(file); pm = new PageManager(f); long i = 1; for (long cur = pm.getFirst(Magic.USED_PAGE); cur != 0; cur = pm.getNext(cur)) { assertEquals("next", i++, cur); if (i > 120) fail("list structure not ok"); } assertEquals("total", 101, i); pm.close(); f.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/PageTransactionManagerTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.File; /** * This class contains all Unit tests for {@link PageTransactionManager}. * TODO sort out this testcase */ public class PageTransactionManagerTest extends TestCaseWithTestFile { String file = newTestFile(); /** * Test constructor. Oops - can only be done indirectly :-) */ public void testCtor() throws Exception { PageFile file2 = new PageFile(file); file2.forceClose(); } /** * Test recovery */ public void XtestRecovery() throws Exception { PageFile file1 = new PageFile(file); // Do three transactions. for (int i = 0; i < 3; i++) { PageIo node = file1.get(i); node.setDirty(); file1.release(node); file1.commit(); } assertDataSizeEquals("len1", 0); assertLogSizeNotZero("len1"); file1.forceClose(); // Leave the old record file in flux, and open it again. // The second instance should start recovery. PageFile file2 = new PageFile(file); assertDataSizeEquals("len2", 3 * Storage.PAGE_SIZE); assertLogSizeEquals("len2", 8); file2.forceClose(); // assure we can recover this log file PageFile file3 = new PageFile(file); file3.forceClose(); } /** * Test background synching */ public void XtestSynching() throws Exception { PageFile file1 = new PageFile(file); // Do enough transactions to fill the first slot int txnCount = 1; for (int i = 0; i < txnCount; i++) { PageIo node = file1.get(i); node.setDirty(); file1.release(node); file1.commit(); } file1.forceClose(); // The data file now has the first slotfull assertDataSizeEquals("len1", 1 * Storage.PAGE_SIZE + 6); assertLogSizeNotZero("len1"); // Leave the old record file in flux, and open it again. // The second instance should start recovery. PageFile file2 = new PageFile(file); assertDataSizeEquals("len2", txnCount * Storage.PAGE_SIZE); assertLogSizeEquals("len2", 8); file2.forceClose(); } // Helpers void assertDataSizeEquals(String msg, long size) { assertEquals(msg + " data size", size, new File(file + ".t").length()); } void assertLogSizeEquals(String msg, long size) { assertEquals(msg + " log size", size, new File(file + StorageDisk.transaction_log_file_extension).length()); } void assertLogSizeNotZero(String msg) { assertTrue(msg + " log size", new File(file + StorageDisk.transaction_log_file_extension).length() != 0); } } ================================================ FILE: src/test/java/org/apache/jdbm/PhysicalFreeRowIdManagerTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; /** * This class contains all Unit tests for {@link PhysicalFreeRowIdManager}. */ public class PhysicalFreeRowIdManagerTest extends TestCaseWithTestFile { /** * Test constructor */ public void testCtor() throws Exception { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); PhysicalFreeRowIdManager freeMgr = new PhysicalFreeRowIdManager( f, pm); pm.close(); f.close(); } /** * Test basics */ public void testBasics() throws Exception { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); PhysicalFreeRowIdManager freeMgr = new PhysicalFreeRowIdManager(f, pm); // allocate 10,000 bytes - should fail on an empty file. long loc = freeMgr.getFreeRecord(10000); assertTrue("loc is not null?", loc == 0); pm.close(); f.close(); } public void testPhysRecRootPage() throws IOException { PageFile f = newRecordFile(); PageManager pm = new PageManager(f); long pageid = pm.allocate(Magic.FREEPHYSIDS_ROOT_PAGE); PageIo p = f.get(pageid); p.writeInt(100,100); f.release(p); pm.commit(); f.commit(); p = f.get(pageid); assertEquals(p.readInt(100),100); } public void test_size_to_root_offset(){ for(int i = 1;iStorage.PAGE_SIZE - Magic.DATA_PAGE_O_DATA) newSize = newSize - (Storage.PAGE_SIZE - Magic.DATA_PAGE_O_DATA); assertEquals(listRecords(pm), arrayList(3000, -newSize, 1000)); } /** return list of records in pageman, negative numbers are free records*/ List listRecords(PageManager pageman) throws IOException { int pos = Magic.DATA_PAGE_O_DATA; List ret =new ArrayList(); for( long pageid = pageman.getFirst(Magic.USED_PAGE); pageid!=0; pageid = pageman.getNext(pageid)){ PageIo page = pageman.file.get(pageid); while(pos < Storage.PAGE_SIZE -RecordHeader.SIZE){ int size = RecordHeader.getAvailableSize(page, (short) pos); if(size == 0) break; int currSize =RecordHeader.getCurrentSize(page, (short) pos); pos+=size+RecordHeader.SIZE; if(currSize==0) size = -size; ret.add(size); } pos = pos +Magic.DATA_PAGE_O_DATA - Storage.PAGE_SIZE; pageman.file.release(page); } return ret; } List arrayList(Integer... args){ ArrayList ret = new ArrayList(); for(Integer i:args) ret.add(i); return ret; } } ================================================ FILE: src/test/java/org/apache/jdbm/RecordHeaderTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.TestCase; import java.util.Random; /** * This class contains all Unit tests for {@link RecordHeader}. */ public class RecordHeaderTest extends TestCase { /** * Test basics - read and write at an offset */ public void testReadWrite() throws Exception { byte[] data = new byte[Storage.PAGE_SIZE]; PageIo test = new PageIo(0, data); //RecordHeader hdr = new RecordHeader(test, (short) 6); RecordHeader.setAvailableSize(test, (short) 6, 2345); RecordHeader.setCurrentSize(test, (short) 6, 2300); assertEquals("current size", 2300, RecordHeader.getCurrentSize(test, (short) 6)); assertEquals("available size", 2345, RecordHeader.getAvailableSize(test, (short) 6)); } public void testRecordSize() { System.out.println("MAX_RECORD_SIZE " + RecordHeader.MAX_RECORD_SIZE); assertEquals("inconsistent rounding at max rec size", RecordHeader.MAX_RECORD_SIZE, RecordHeader.roundAvailableSize(RecordHeader.MAX_RECORD_SIZE)); byte[] data = new byte[Storage.PAGE_SIZE]; PageIo test = new PageIo(0, data); Random r = new Random(); //RecordHeader hdr = new RecordHeader(test, (short) 6); for (int size = 2; size <= RecordHeader.MAX_RECORD_SIZE; size++) { //set size int currSize = size; int availSize = RecordHeader.roundAvailableSize(currSize); assertTrue(availSize - currSize < RecordHeader.MAX_SIZE_SPACE); assertTrue(currSize <= availSize); assertEquals("size rounding function does not provide consistent results " + availSize, availSize, RecordHeader.roundAvailableSize(availSize)); //make sure it writes and reads back correctly RecordHeader.setAvailableSize(test, (short) 6, availSize); assertEquals("available size", availSize, RecordHeader.getAvailableSize(test, (short) 6)); RecordHeader.setCurrentSize(test, (short) 6, currSize); assertEquals("current size", currSize, RecordHeader.getCurrentSize(test, (short) 6)); //try random size within given offset int newCurrSize = availSize - r.nextInt(RecordHeader.MAX_SIZE_SPACE); if (newCurrSize < 0) newCurrSize = 0; RecordHeader.setCurrentSize(test, (short) 6, newCurrSize); assertEquals("current size", newCurrSize, RecordHeader.getCurrentSize(test, (short) 6)); RecordHeader.setCurrentSize(test, (short) 6, 0); size++; // comment out next line to do full test if (size > 1e6) size = (int) (size * 1.01); } } public void testMaxRecordSize() { long max = 0; for (int i = 0; i < 1e7; i++) { int deconverted = RecordHeader.deconvertAvailSize(RecordHeader.convertAvailSize(i)); if (i == deconverted) { max = i; } } assertEquals("Maximal record size does not match the calculated one: " + max, max, RecordHeader.MAX_RECORD_SIZE); } public void testRoundingSmall() { for (int i = 0; i <= Short.MAX_VALUE; i++) { assertEquals(i, RecordHeader.convertAvailSize(i)); } } public void testRounding() { for (int i = 0; i < RecordHeader.MAX_RECORD_SIZE; i++) { int deconverted = RecordHeader.deconvertAvailSize(RecordHeader.convertAvailSize(i)); assertTrue("deconverted size is smaller than actual: " + i + " versus " + deconverted, deconverted >= i); } } public void testSetCurrentSize() { PageIo b = new PageIo(4l, new byte[Storage.PAGE_SIZE]); short pos = 10; RecordHeader.setAvailableSize(b, pos, 1000); assertEquals(1000, RecordHeader.getAvailableSize(b, pos)); RecordHeader.setCurrentSize(b, pos, 900); assertEquals(900, RecordHeader.getCurrentSize(b, pos)); RecordHeader.setCurrentSize(b, pos, 0); assertEquals(0, RecordHeader.getCurrentSize(b, pos)); RecordHeader.setCurrentSize(b, pos, 1000 - 254); assertEquals(1000 - 254, RecordHeader.getCurrentSize(b, pos)); short pos2 = 20; RecordHeader.setAvailableSize(b, pos2, 10000); assertEquals(10000, RecordHeader.getAvailableSize(b, pos2)); RecordHeader.setCurrentSize(b, pos2, 10000); assertEquals(10000, RecordHeader.getCurrentSize(b, pos2)); } } ================================================ FILE: src/test/java/org/apache/jdbm/RollbackTest.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.Set; public class RollbackTest extends TestCaseWithTestFile{ public void test_treemap() throws IOException { DB db = newDBCache(); Map map = db.createTreeMap("collectionName"); map.put(1, "one"); map.put(2, "two"); assertEquals(2, map.size()); db.commit(); //persist changes into disk map.put(3, "three"); assertEquals(3, map.size()); db.rollback(); //revert recent changes assertEquals(2, map.size()); } public void test_hashmap() throws IOException { DB db = newDBCache(); Map map = db.createHashMap("collectionName"); map.put(1, "one"); map.put(2, "two"); assertEquals(2, map.size()); db.commit(); //persist changes into disk map.put(3, "three"); assertEquals(3, map.size()); db.rollback(); //revert recent changes assertEquals(2, map.size()); } public void test_treeset() throws IOException { DB db = newDBCache(); Set c = db.createTreeSet("collectionName"); c.add(1); c.add(2); assertEquals(2, c.size()); db.commit(); //persist changes into disk c.add(3); assertEquals(3, c.size()); db.rollback(); //revert recent changes assertEquals(2, c.size()); } public void test_hashset() throws IOException { DB db = newDBCache(); Set c = db.createHashSet("collectionName"); c.add(1); c.add(2); assertEquals(2, c.size()); db.commit(); //persist changes into disk c.add(3); assertEquals(3, c.size()); db.rollback(); //revert recent changes assertEquals(2, c.size()); } public void test_linkedlist() throws IOException { DB db = newDBCache(); List c = db.createLinkedList("collectionName"); c.add(1); c.add(2); assertEquals(2, c.size()); db.commit(); //persist changes into disk c.add(3); assertEquals(3, c.size()); db.rollback(); //revert recent changes assertEquals(2, c.size()); } } ================================================ FILE: src/test/java/org/apache/jdbm/SerialClassInfoTest.java ================================================ package org.apache.jdbm; import java.io.*; import java.util.AbstractMap; import java.util.ArrayList; public class SerialClassInfoTest extends TestCaseWithTestFile { static class Bean1 implements Serializable { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Bean1 bean1 = (Bean1) o; if (Double.compare(bean1.doubleField, doubleField) != 0) return false; if (Float.compare(bean1.floatField, floatField) != 0) return false; if (intField != bean1.intField) return false; if (longField != bean1.longField) return false; if (field1 != null ? !field1.equals(bean1.field1) : bean1.field1 != null) return false; if (field2 != null ? !field2.equals(bean1.field2) : bean1.field2 != null) return false; return true; } protected String field1 = null; protected String field2 = null; protected int intField = Integer.MAX_VALUE; protected long longField = Long.MAX_VALUE; protected double doubleField = Double.MAX_VALUE; protected float floatField = Float.MAX_VALUE; transient int getCalled = 0; transient int setCalled = 0; public String getField2() { getCalled++; return field2; } public void setField2(String field2) { setCalled++; this.field2 = field2; } Bean1(String field1, String field2) { this.field1 = field1; this.field2 = field2; } Bean1() { } } static class Bean2 extends Bean1 { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; Bean2 bean2 = (Bean2) o; if (field3 != null ? !field3.equals(bean2.field3) : bean2.field3 != null) return false; return true; } @Override public int hashCode() { return field3 != null ? field3.hashCode() : 0; } private String field3 = null; Bean2(String field1, String field2, String field3) { super(field1, field2); this.field3 = field3; } Bean2() { } } SerialClassInfo s; public void setUp() throws IOException { s = new Serialization(); } Bean1 b = new Bean1("aa", "bb"); Bean2 b2 = new Bean2("aa", "bb", "cc"); public void testGetFieldValue1() throws Exception { assertEquals("aa", s.getFieldValue("field1", b)); } public void testGetFieldValue2() throws Exception { assertEquals("bb", s.getFieldValue("field2", b)); assertEquals(1, b.getCalled); } public void testGetFieldValue3() throws Exception { assertEquals("aa", s.getFieldValue("field1", b2)); } public void testGetFieldValue4() throws Exception { assertEquals("bb", s.getFieldValue("field2", b2)); assertEquals(1, b2.getCalled); } public void testGetFieldValue5() throws Exception { assertEquals("cc", s.getFieldValue("field3", b2)); } public void testSetFieldValue1() { s.setFieldValue("field1", b, "zz"); assertEquals("zz", b.field1); } public void testSetFieldValue2() { s.setFieldValue("field2", b, "zz"); assertEquals("zz", b.field2); assertEquals(1, b.setCalled); } public void testSetFieldValue3() { s.setFieldValue("field1", b2, "zz"); assertEquals("zz", b2.field1); } public void testSetFieldValue4() { s.setFieldValue("field2", b2, "zz"); assertEquals("zz", b2.field2); assertEquals(1, b2.setCalled); } public void testSetFieldValue5() { s.setFieldValue("field3", b2, "zz"); assertEquals("zz", b2.field3); } public void testGetPrimitiveField() { assertEquals(Integer.MAX_VALUE, s.getFieldValue("intField", b2)); assertEquals(Long.MAX_VALUE, s.getFieldValue("longField", b2)); assertEquals(Double.MAX_VALUE, s.getFieldValue("doubleField", b2)); assertEquals(Float.MAX_VALUE, s.getFieldValue("floatField", b2)); } public void testSetPrimitiveField() { s.setFieldValue("intField", b2, -1); assertEquals(-1, s.getFieldValue("intField", b2)); s.setFieldValue("longField", b2, -1L); assertEquals(-1L, s.getFieldValue("longField", b2)); s.setFieldValue("doubleField", b2, -1D); assertEquals(-1D, s.getFieldValue("doubleField", b2)); s.setFieldValue("floatField", b2, -1F); assertEquals(-1F, s.getFieldValue("floatField", b2)); } E serialize(E e) throws ClassNotFoundException, IOException { Serialization s2 = new Serialization(); ByteArrayOutputStream out = new ByteArrayOutputStream(); s2.serialize(new DataOutputStream(out), e); ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray()); return (E) s2.deserialize(new DataInputStream(in)); } public void testSerializable() throws Exception { assertEquals(serialize(b), b); } public void testRecursion() throws Exception { AbstractMap.SimpleEntry b = new AbstractMap.SimpleEntry("abcd", null); b.setValue(b.getKey()); AbstractMap.SimpleEntry bx = serialize(b); assertEquals(bx, b); assert (bx.getKey() == bx.getValue()); } public void testRecursion2() throws Exception { AbstractMap.SimpleEntry b = new AbstractMap.SimpleEntry("abcd", null); b.setValue(b); AbstractMap.SimpleEntry bx = serialize(b); assertTrue(bx == bx.getValue()); assertEquals(bx.getKey(), "abcd"); } public void testRecursion3() throws Exception { ArrayList l = new ArrayList(); l.add("123"); l.add(l); ArrayList l2 = serialize(l); assertTrue(l.size() == 2); assertEquals(l.get(0), "123"); assertTrue(l.get(1) == l); } public void testPersistedSimple() throws Exception { String f = newTestFile(); DBAbstract r1 = (DBAbstract) DBMaker.openFile(f).make(); long recid = r1.insert("AA"); r1.commit(); r1.close(); DBAbstract r2 = (DBAbstract) DBMaker.openFile(f).make(); String a2 = r2.fetch(recid); r2.close(); assertEquals("AA", a2); } public void testPersisted() throws Exception { Bean1 b1 = new Bean1("abc", "dcd"); String f = newTestFile(); DBAbstract r1 = (DBAbstract) DBMaker.openFile(f).make() ; long recid = r1.insert(b1); r1.commit(); r1.close(); DBAbstract r2 = (DBAbstract) DBMaker.openFile(f).make(); Bean1 b2 = (Bean1) r2.fetch(recid); r2.close(); assertEquals(b1, b2); } } ================================================ FILE: src/test/java/org/apache/jdbm/Serialization2Bean.java ================================================ package org.apache.jdbm; import java.io.Serializable; public class Serialization2Bean implements Serializable { // =========================== Constants =============================== private static final long serialVersionUID = 2757814409580877461L; // =========================== Attributes ============================== private String id = "test"; private String f1 = ""; private String f2 = ""; private String f3 = null; private String f4 = ""; private String f5 = null; private String f6 = ""; @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((f1 == null) ? 0 : f1.hashCode()); result = prime * result + ((f2 == null) ? 0 : f2.hashCode()); result = prime * result + ((f3 == null) ? 0 : f3.hashCode()); result = prime * result + ((f4 == null) ? 0 : f4.hashCode()); result = prime * result + ((f5 == null) ? 0 : f5.hashCode()); result = prime * result + ((f6 == null) ? 0 : f6.hashCode()); result = prime * result + ((id == null) ? 0 : id.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Serialization2Bean other = (Serialization2Bean) obj; if (f1 == null) { if (other.f1 != null) { return false; } } else if (!f1.equals(other.f1)) { return false; } if (f2 == null) { if (other.f2 != null) { return false; } } else if (!f2.equals(other.f2)) { return false; } if (f3 == null) { if (other.f3 != null) { return false; } } else if (!f3.equals(other.f3)) { return false; } if (f4 == null) { if (other.f4 != null) { return false; } } else if (!f4.equals(other.f4)) { return false; } if (f5 == null) { if (other.f5 != null) { return false; } } else if (!f5.equals(other.f5)) { return false; } if (f6 == null) { if (other.f6 != null) { return false; } } else if (!f6.equals(other.f6)) { return false; } if (id == null) { if (other.id != null) { return false; } } else if (!id.equals(other.id)) { return false; } return true; } } ================================================ FILE: src/test/java/org/apache/jdbm/Serialization2Test.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.io.Serializable; import java.util.Map; public class Serialization2Test extends TestCaseWithTestFile { public void test2() throws IOException { DB db = newDBNoCache(); Serialization2Bean processView = new Serialization2Bean(); Map map = db.createHashMap("test2"); map.put("abc", processView); db.commit(); Serialization2Bean retProcessView = (Serialization2Bean)map.get("abc"); assertEquals(processView, retProcessView); db.close(); } public void test3() throws IOException { String file = newTestFile(); Serialized2DerivedBean att = new Serialized2DerivedBean(); DB db = DBMaker.openFile(file).disableCache().make(); Map map = db.createHashMap("test"); map.put("att", att); db.commit(); db.close(); db = DBMaker.openFile(file).disableCache().make(); map = db.getHashMap("test"); Serialized2DerivedBean retAtt = (Serialized2DerivedBean) map.get("att"); assertEquals(att, retAtt); } static class AAA implements Serializable { String test = "aa"; } public void testReopenWithDefrag(){ String f = newTestFile(); DB db = DBMaker.openFile(f) .disableTransactions() .make(); Map map = db.createTreeMap("test"); map.put(1,new AAA()); db.defrag(true); db.close(); db = DBMaker.openFile(f) .disableTransactions() .make(); map = db.getTreeMap("test"); assertNotNull(map.get(1)); assertEquals(map.get(1).test, "aa"); db.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/SerializationHeaderTest.java ================================================ package org.apache.jdbm; import junit.framework.TestCase; import java.lang.reflect.Field; import java.util.Set; import java.util.TreeSet; public class SerializationHeaderTest extends TestCase { public void testUnique() throws IllegalAccessException { Class c = SerializationHeader.class; Set s = new TreeSet(); for (Field f : c.getDeclaredFields()) { f.setAccessible(true); int value = f.getInt(null); assertTrue("Value already used: " + value, !s.contains(value)); s.add(value); } assertTrue(!s.isEmpty()); } } ================================================ FILE: src/test/java/org/apache/jdbm/SerializationTest.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.TestCase; import java.io.*; import java.math.BigDecimal; import java.math.BigInteger; import java.util.AbstractMap.SimpleEntry; import java.util.*; import static java.util.Arrays.asList; @SuppressWarnings("unchecked") public class SerializationTest extends TestCase { Serialization ser; public SerializationTest() throws IOException { ser = new Serialization(); } public void testInt() throws IOException, ClassNotFoundException { int[] vals = { Integer.MIN_VALUE, -Short.MIN_VALUE * 2, -Short.MIN_VALUE + 1, -Short.MIN_VALUE, -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, Short.MAX_VALUE * 2, Integer.MAX_VALUE }; for (int i : vals) { byte[] buf = ser.serialize(i); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Integer.class); assertEquals(l2, i); } } public void testShort() throws IOException, ClassNotFoundException { short[] vals = { (short) (-Short.MIN_VALUE + 1), (short) -Short.MIN_VALUE, -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE - 1, Short.MAX_VALUE }; for (short i : vals) { byte[] buf = ser.serialize(i); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Short.class); assertEquals(l2, i); } } public void testDouble() throws IOException, ClassNotFoundException { double[] vals = { 1f, 0f, -1f, Math.PI, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, -100 }; for (double i : vals) { byte[] buf = ser.serialize(i); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Double.class); assertEquals(l2, i); } } public void testFloat() throws IOException, ClassNotFoundException { float[] vals = { 1f, 0f, -1f, (float) Math.PI, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, -100 }; for (float i : vals) { byte[] buf = ser.serialize(i); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Float.class); assertEquals(l2, i); } } public void testChar() throws IOException, ClassNotFoundException { char[] vals = { 'a', ' ' }; for (char i : vals) { byte[] buf = ser.serialize(i); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Character.class); assertEquals(l2, i); } } public void testLong() throws IOException, ClassNotFoundException { long[] vals = { Long.MIN_VALUE, Integer.MIN_VALUE, Integer.MIN_VALUE - 1, Integer.MIN_VALUE + 1, -Short.MIN_VALUE * 2, -Short.MIN_VALUE + 1, -Short.MIN_VALUE, -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, Short.MAX_VALUE * 2, Integer.MAX_VALUE, Integer.MAX_VALUE + 1, Long.MAX_VALUE }; for (long i : vals) { byte[] buf = ser.serialize(i); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Long.class); assertEquals(l2, i); } } public void testBoolean1() throws IOException, ClassNotFoundException { byte[] buf = ser.serialize(true); Object l2 = ser.deserialize(buf); assertTrue(l2.getClass() == Boolean.class); assertEquals(l2, true); byte[] buf2 = ser.serialize(false); Object l22 = ser.deserialize(buf2); assertTrue(l22.getClass() == Boolean.class); assertEquals(l22, false); } public void testString() throws IOException, ClassNotFoundException { byte[] buf = ser.serialize("Abcd"); String l2 = (String) ser.deserialize(buf); assertEquals(l2, "Abcd"); } public void testBigString() throws IOException, ClassNotFoundException { String bigString = ""; for (int i = 0; i < 1e4; i++) bigString += i % 10; byte[] buf = ser.serialize(bigString); String l2 = (String) ser.deserialize(buf); assertEquals(l2, bigString); } public void testObject() throws ClassNotFoundException, IOException { SimpleEntry a = new SimpleEntry(1, "11"); byte[] buf = ser.serialize(a); SimpleEntry l2 = (SimpleEntry) ser.deserialize(buf); assertEquals(l2, a); } public void testNoArgumentConstructorInJavaSerialization() throws ClassNotFoundException, IOException { SimpleEntry a = new SimpleEntry(1, "11"); ByteArrayOutputStream out = new ByteArrayOutputStream(); new ObjectOutputStream(out).writeObject(a); ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(out.toByteArray())); SimpleEntry a2 = (SimpleEntry) in.readObject(); assertEquals(a, a2); } public void testArrayList() throws ClassNotFoundException, IOException { Collection c = new ArrayList(); for (int i = 0; i < 200; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testLinkedList() throws ClassNotFoundException, IOException { Collection c = new java.util.LinkedList(); for (int i = 0; i < 200; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testVector() throws ClassNotFoundException, IOException { Collection c = new Vector(); for (int i = 0; i < 200; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testTreeSet() throws ClassNotFoundException, IOException { Collection c = new TreeSet(); for (int i = 0; i < 200; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testHashSet() throws ClassNotFoundException, IOException { Collection c = new HashSet(); for (int i = 0; i < 200; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testLinkedHashSet() throws ClassNotFoundException, IOException { Collection c = new LinkedHashSet(); for (int i = 0; i < 200; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.add(i); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testHashMap() throws ClassNotFoundException, IOException { Map c = new HashMap(); for (int i = 0; i < 200; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testTreeMap() throws ClassNotFoundException, IOException { Map c = new TreeMap(); for (int i = 0; i < 200; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testLinkedHashMap() throws ClassNotFoundException, IOException { Map c = new LinkedHashMap(); for (int i = 0; i < 200; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testHashtable() throws ClassNotFoundException, IOException { Map c = new Hashtable(); for (int i = 0; i < 200; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testProperties() throws ClassNotFoundException, IOException { Properties c = new Properties(); for (int i = 0; i < 200; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); for (int i = 0; i < 2000; i++) c.put(i, i + 10000); assertEquals(c, ser.deserialize(ser.serialize(c))); } public void testClass() throws IOException, ClassNotFoundException { byte[] buf = ser.serialize(String.class); Class l2 = (Class) ser.deserialize(buf); assertEquals(l2, String.class); } public void testClass2() throws IOException, ClassNotFoundException { byte[] buf = ser.serialize(long[].class); Class l2 = (Class) ser.deserialize(buf); assertEquals(l2, long[].class); } public void testUnicodeString() throws ClassNotFoundException, IOException { String s = "Ciudad Bolíva"; byte[] buf = ser.serialize(s); assertTrue("text is not unicode", buf.length != s.length()); Object l2 = ser.deserialize(buf); assertEquals(l2, s); } public void testSerializationHeader() throws IOException { ByteArrayOutputStream b = new ByteArrayOutputStream(); new java.io.ObjectOutputStream(b).writeObject("lalala"); ByteArrayInputStream i = new ByteArrayInputStream(b.toByteArray()); final int header1 = i.read(); ByteArrayOutputStream b2 = new ByteArrayOutputStream(); new java.io.ObjectOutputStream(b2).writeObject(new Integer(1)); ByteArrayInputStream i2 = new ByteArrayInputStream(b2.toByteArray()); final int header2 = i2.read(); assertEquals(header1, header2); assertEquals(header1, SerializationHeader.JAVA_SERIALIZATION); } public void testPackedLongCollection() throws ClassNotFoundException, IOException { ArrayList l1 = new ArrayList(); l1.add(0L); l1.add(1L); l1.add(0L); assertEquals(l1, ser.deserialize(ser.serialize(l1))); l1.add(-1L); assertEquals(l1, ser.deserialize(ser.serialize(l1))); } public void testNegativeLongsArray() throws ClassNotFoundException, IOException { long[] l = new long[] { -12 }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (long[]) deserialize)); } public void testNegativeIntArray() throws ClassNotFoundException, IOException { int[] l = new int[] { -12 }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (int[]) deserialize)); } public void testNegativeShortArray() throws ClassNotFoundException, IOException { short[] l = new short[] { -12 }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (short[]) deserialize)); } public void testBooleanArray() throws ClassNotFoundException, IOException { boolean[] l = new boolean[] { true,false }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (boolean[]) deserialize)); } public void testDoubleArray() throws ClassNotFoundException, IOException { double[] l = new double[] { Math.PI, 1D }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (double[]) deserialize)); } public void testFloatArray() throws ClassNotFoundException, IOException { float[] l = new float[] { 1F, 1.234235F }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (float[]) deserialize)); } public void testByteArray() throws ClassNotFoundException, IOException { byte[] l = new byte[] { 1,34,-5 }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (byte[]) deserialize)); } public void testCharArray() throws ClassNotFoundException, IOException { char[] l = new char[] { '1','a','&' }; Object deserialize = ser.deserialize(ser.serialize(l)); assertTrue(Arrays.equals(l, (char[]) deserialize)); } public void testDate() throws IOException, ClassNotFoundException { Date d = new Date(6546565565656L); assertEquals(d, ser.deserialize(ser.serialize(d))); d = new Date(System.currentTimeMillis()); assertEquals(d, ser.deserialize(ser.serialize(d))); } public void testBigDecimal() throws IOException, ClassNotFoundException { BigDecimal d = new BigDecimal("445656.7889889895165654423236"); assertEquals(d, ser.deserialize(ser.serialize(d))); d = new BigDecimal("-53534534534534445656.7889889895165654423236"); assertEquals(d, ser.deserialize(ser.serialize(d))); } public void testBigInteger() throws IOException, ClassNotFoundException { BigInteger d = new BigInteger("4456567889889895165654423236"); assertEquals(d, ser.deserialize(ser.serialize(d))); d = new BigInteger("-535345345345344456567889889895165654423236"); assertEquals(d, ser.deserialize(ser.serialize(d))); } public void testUUID() throws IOException, ClassNotFoundException { //try a bunch of UUIDs. for(int i = 0; i < 1000;i++) { UUID uuid = UUID.randomUUID(); SimpleEntry a = new SimpleEntry(uuid, "11"); byte[] buf = ser.serialize(a); SimpleEntry b = (SimpleEntry) ser.deserialize(buf); assertEquals(b, a); } } public void testLocale() throws Exception{ assertEquals(Locale.FRANCE, ser.deserialize(ser.serialize(Locale.FRANCE))); assertEquals(Locale.CANADA_FRENCH, ser.deserialize(ser.serialize(Locale.CANADA_FRENCH))); assertEquals(Locale.SIMPLIFIED_CHINESE, ser.deserialize(ser.serialize(Locale.SIMPLIFIED_CHINESE))); } enum Order { ASCENDING, DESCENDING } public void testEnum() throws Exception{ Order o = Order.ASCENDING; o = (Order) ser.deserialize(ser.serialize(o)); assertEquals(o,Order.ASCENDING ); assertEquals(o.ordinal(),Order.ASCENDING .ordinal()); assertEquals(o.name(),Order.ASCENDING .name()); o = Order.DESCENDING; o = (Order) ser.deserialize(ser.serialize(o)); assertEquals(o,Order.DESCENDING ); assertEquals(o.ordinal(),Order.DESCENDING .ordinal()); assertEquals(o.name(),Order.DESCENDING .name()); } static class Extr implements Externalizable{ int aaa = 11; String l = "agfa"; public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(l); out.writeInt(aaa); } public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { l = (String) in.readObject(); aaa = in.readInt()+1; } } public void testExternalizable() throws Exception{ Extr e = new Extr(); e.aaa = 15; e.l = "pakla"; e = (Extr) ser.deserialize(ser.serialize(e)); assertEquals(e.aaa,16); //was incremented during serialization assertEquals(e.l,"pakla"); } public void testObjectArrayArray() throws IOException, ClassNotFoundException { Object[][] arr = new Object[][] { {(int)25, (short)20, (short)32, (short)16, (short)20}, }; Object[][] arr2 = (Object[][]) ser.deserialize(ser.serialize(arr)); for(int i=0;i h = r.createHashSet("hash"); for (Long l = 0L; l < 1e3; l++) { h.add(l); } r.commit(); r.copyToZip(dbpath); r.close(); System.out.println("Zip file created, size: " + tmp.length()); //open zip file and check it contains all data DB r2 = DBMaker.openZip(dbpath).readonly().make(); Set h2 = r2.getHashSet("hash"); for (Long l = 0L; l < 1e3; l++) { assertTrue(h2.contains(l)); } } } ================================================ FILE: src/test/java/org/apache/jdbm/StreamCorrupted.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; /** * Contributed test case for BTree by Christof Dallermassl (cdaller iicm.edu): *

* -= quote from original message posted on jdbm-general =- *

 *
 * I tried to insert a couple of elements into a BTree and then remove
 * them one by one. After a number or removals, there is always (if more
 * than 20 elements in btree) a java.io.StreamCorruptedException thrown.
 *
 * The strange thing is, that on 50 elements, the exception is thrown
 * after removing 22, on 200 it is thrown after 36, on 1000 it is thrown
 * after 104, on 10000 it is thrown after 1003....
 *
 * The full stackTrace is here:
 * ---------------------- snip ------- snap -------------------------
 * java.io.StreamCorruptedException: Caught EOFException while reading the
 * stream header
 *   at java.io.ObjectInputStream.readStreamHeader(ObjectInputStream.java:845)
 *   at java.io.ObjectInputStream.(ObjectInputStream.java:168)
 *   at jdbm.db.DB.byteArrayToObject(DB.java:296)
 *   at jdbm.db.DB.fetchObject(DB.java:239)
 *   at jdbm.helper.ObjectCache.fetchObject(ObjectCache.java:104)
 *   at jdbm.btree.BPage.loadBPage(BPage.java:670)
 *   at jdbm.btree.BPage.remove(BPage.java:492)
 *   at jdbm.btree.BPage.remove(BPage.java:437)
 *   at jdbm.btree.BTree.remove(BTree.java:313)
 *   at JDBMTest.main(JDBMTest.java:41)
 *
 * 
* * @author Christof Dallermassl */ public class StreamCorrupted extends TestCaseWithTestFile { /** * Basic tests */ public void testStreamCorrupted() throws IOException { DBAbstract db; BTree btree; int iterations; iterations = 100; // 23 works :-((((( // open database db = newDBCache(); // create a new B+Tree data structure btree = BTree.createInstance(db); db.setNamedObject("testbtree", btree.getRecid()); // action: // insert data for (int count = 0; count < iterations; count++) { btree.insert("num" + count, new Integer(count), true); } // delete data for (int count = 0; count < iterations; count++) { btree.remove("num" + count); } // close database db.close(); db = null; } } ================================================ FILE: src/test/java/org/apache/jdbm/TestCaseWithTestFile.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.TestCase; import java.io.File; import java.io.IOException; /** * Subclass from this class if you have any test cases that need to do file I/O. The * setUp() and tearDown() methods here will take care of cleanup on disk. * * @author cdegroot */ abstract class TestCaseWithTestFile extends TestCase { public static final String testFolder = System.getProperty("java.io.tmpdir", ".") + "/_testdb"; // public static final String testFileName = "test"; public void setUp() throws Exception { File f = new File(testFolder); if (!f.exists()) f.mkdirs(); } public void tearDown() throws Exception { File f = new File(testFolder); if (f.exists()) { for (File f2 : f.listFiles()) { f2.deleteOnExit(); f2.delete(); } } } static public String newTestFile() { return testFolder + File.separator + "test" + System.nanoTime(); } static public PageFile newRecordFile() throws IOException { return new PageFile(newTestFile()); } static public DBAbstract newDBCache() throws IOException { return (DBAbstract) DBMaker.openFile(newTestFile()).make(); } static public DBStore newDBNoCache() throws IOException { return (DBStore) DBMaker.openFile(newTestFile()).disableCache().make(); } } ================================================ FILE: src/test/java/org/apache/jdbm/TestInsertPerf.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.io.IOException; /** * Test BTree insert performance. */ public class TestInsertPerf extends TestCaseWithTestFile { int _numberOfObjects = 1000; public void testInsert() throws IOException { long start, finish; DBAbstract db = newDBCache(); BTree btree = BTree.createInstance(db); // Note: One can use specialized serializers for better performance / database size // btree = BTree.createInstance( db, new LongComparator(), // LongSerializer.INSTANCE, IntegerSerializer.INSTANCE ); start = System.currentTimeMillis(); for (int i = 0; i < _numberOfObjects; i++) { btree.insert(new Long(i), new Integer(i), false); } db.commit(); finish = System.currentTimeMillis(); System.out.println("It took " + (finish - start) + " ms to insert " + _numberOfObjects + " objects."); } } ================================================ FILE: src/test/java/org/apache/jdbm/TestInsertUpdate.java ================================================ package org.apache.jdbm; import java.io.*; import java.util.Map; import org.junit.Test; public class TestInsertUpdate extends TestCaseWithTestFile { /** * Test that the object is not modified by serialization. * * @throws IOException */ @Test public void testInsertUpdateWithCustomSerializer() throws IOException { DB db = newDBCache(); Serializer serializer = new HTreeBucketTest.LongSerializer(); Map map = db.createHashMap("custom", serializer, serializer); map.put(new Long(1), new Long(1)); map.put(new Long(2), new Long(2)); db.commit(); map.put(new Long(2), new Long(3)); db.commit(); db.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/TestIssues.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.util.Map; public class TestIssues extends TestCaseWithTestFile { /* test this issue http://code.google.com/p/jdbm2/issues/detail?id=2 */ public void testHTreeClear() throws IOException { final DBAbstract db = newDBCache(); final HTree tree = (HTree) db.createHashMap("name"); for (int i = 0; i < 1001; i++) { tree.put(String.valueOf(i), String.valueOf(i)); } db.commit(); System.out.println("finished adding"); tree.clear(); db.commit(); System.out.println("finished clearing"); assertTrue(tree.isEmpty()); } public void testBTreeClear() throws IOException { final DB db = newDBCache(); final Map treeMap = db.createTreeMap("test"); for (int i = 0; i < 1001; i++) { treeMap.put(String.valueOf(i), String.valueOf(i)); } db.commit(); System.out.println("finished adding"); treeMap.clear(); db.commit(); System.out.println("finished clearing"); assertTrue(treeMap.isEmpty()); } public void test_issue_17_double_concurrent_get() throws InterruptedException { final DB db = DBMaker.openFile(newTestFile()).disableTransactions().disableCache().make(); db.createHashMap("map"); class RR implements Runnable{ public void run() { Map m =db.getHashMap("map"); for(int i = 1; i < 10000; i++) m.put(i, "-"+ i ); } } Thread thread = new Thread(new RR()); thread.start(); new RR().run(); thread.join(); db.close(); } public void test_issue_84_reopen_after_close(){ String f = newTestFile(); DB db = DBMaker.openFile(f).make(); db.close(); db = DBMaker.openFile(f).readonly().make(); db.close(); } } ================================================ FILE: src/test/java/org/apache/jdbm/TestLargeData.java ================================================ package org.apache.jdbm; import java.io.IOException; import java.util.Arrays; public class TestLargeData extends TestCaseWithTestFile { public void testLargeData() throws IOException { DBAbstract db = new DBStore(newTestFile(), false, false,false); byte[] data = UtilTT.makeRecord(1000000, (byte) 12); final long id = db.insert(data); data = (byte[]) db.fetch(id); UtilTT.checkRecord(data, 1000000, (byte) 12); db.commit(); data = UtilTT.makeRecord(2000000, (byte) 13); db.update(id, data); db.commit(); data = (byte[]) db.fetch(id); UtilTT.checkRecord(data, 2000000, (byte) 13); db.commit(); data = UtilTT.makeRecord(1500000, (byte) 14); db.update(id, data); data = (byte[]) db.fetch(id); UtilTT.checkRecord(data, 1500000, (byte) 14); db.commit(); data = UtilTT.makeRecord(2500000, (byte) 15); db.update(id, data); db.rollback(); data = (byte[]) db.fetch(id); UtilTT.checkRecord(data, 1500000, (byte) 14); db.commit(); data = UtilTT.makeRecord(1, (byte) 20); db.update(id, data); data = (byte[]) db.fetch(id); UtilTT.checkRecord(data, 1, (byte) 20); db.commit(); } public void testAllSizes() throws IOException { //use in memory store to make it faster DBStore db = (DBStore) DBMaker.openFile(newTestFile()).disableCache().disableTransactions().make(); for(int i = 1;i m) throws IOException { m.put(1, ""); long counter = r.countRecords(); //number of records should increase after inserting big record m.put(1, makeString(1000)); assertEquals(counter + 1, r.countRecords()); assertEquals(m.get(1), makeString(1000)); //old record should be disposed when replaced with big record m.put(1, makeString(1001)); assertEquals(counter + 1, r.countRecords()); assertEquals(m.get(1), makeString(1001)); //old record should be disposed when replaced with small record m.put(1, "aa"); assertEquals(counter, r.countRecords()); assertEquals(m.get(1), "aa"); //old record should be disposed after deleting m.put(1, makeString(1001)); assertEquals(counter + 1, r.countRecords()); assertEquals(m.get(1), makeString(1001)); m.remove(1); assertTrue(counter >= r.countRecords()); assertEquals(m.get(1), null); } public void testBTree() throws IOException { DBStore r = newDBNoCache(); Map m = r.createTreeMap("test"); doIt(r, m); } public void testHTree() throws IOException { DBStore r = newDBNoCache(); Map m = r.createHashMap("test"); doIt(r, m); } } ================================================ FILE: src/test/java/org/apache/jdbm/TestRollback.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; /** * Test cases for HTree rollback */ public class TestRollback extends TestCaseWithTestFile { /** * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) */ public void testRollback1() throws Exception { // Note: We start out with an empty file DBAbstract db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); tree.put("Foo", "Bar"); tree.put("Fo", "Fum"); db.commit(); tree.put("Hello", "World"); db.rollback(); assertTrue(tree.get("Foo").equals("Bar")); assertTrue(tree.get("Fo").equals("Fum")); assertTrue(tree.get("Hello") == null); } /** * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) */ public void testRollback2() throws Exception { DBAbstract db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); tree.put("hello", "world"); tree.put("goodnight", "gracie"); db.commit(); tree.put("derek", "dick"); db.rollback(); assertTrue(tree.get("derek") == null); assertTrue(tree.get("goodnight").equals("gracie")); assertTrue(tree.get("hello").equals("world")); } /** * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) */ public void testRollback1b() throws Exception { // Note: We start out with an empty file DBAbstract db = newDBCache(); HTree tree = (HTree) db.createHashMap("test"); tree.put("Foo", "Bar"); tree.put("Fo", "Fum"); db.commit(); tree.put("Hello", "World"); db.rollback(); assertTrue(tree.get("Foo").equals("Bar")); assertTrue(tree.get("Fo").equals("Fum")); assertTrue(tree.get("Hello") == null); } /** * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) */ public void testRollback2b() throws Exception { DBAbstract db; long root; // Note: We start out with an empty file db = newDBCache(); root = db.getNamedObject("xyz"); BTree tree = null; if (root == 0) { // create a new one tree = BTree.createInstance(db); root = tree.getRecid(); db.setNamedObject("xyz", root); db.commit(); } else { tree = BTree.load(db, root); } tree.insert("hello", "world", true); tree.insert("goodnight", "gracie", true); db.commit(); tree.insert("derek", "dick", true); db.rollback(); assertTrue(tree.get("derek") == null); assertTrue(tree.get("goodnight").equals("gracie")); assertTrue(tree.get("hello").equals("world")); } } ================================================ FILE: src/test/java/org/apache/jdbm/TestStress.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import java.util.Random; /** * This class contains stress tests for this package. */ public class TestStress extends TestCaseWithTestFile { // test parameters final int RECORDS = 10000; final int MAXSIZE = 500; final int ROUNDS = 1 * 1000 * 1000; final int RPPROMILLE = ROUNDS / 1000; Random rnd = new Random(42); // holder for record data so we can compare class RecordData { long rowid; int size; byte b; RecordData(long rowid, int size, byte b) { this.rowid = rowid; this.size = size; this.b = b; } public String toString() { return "slot(" + rowid + ",sz=" + size + ",b=" + b + ")"; } } private int getRandomAllocatedSlot(RecordData[] d) { int slot = rnd.nextInt(RECORDS); while (d[slot] == null) { slot++; if (slot == RECORDS) slot = 0; // wrap } return slot; } // holder for root records long[] roots = new long[Magic.FILE_HEADER_NROOTS]; private int getRandomAllocatedRoot() { int slot = rnd.nextInt(Magic.FILE_HEADER_NROOTS); while (roots[slot] == 0) { slot++; if (slot == Magic.FILE_HEADER_NROOTS) slot = 0; // wrap } return slot; } /** * Test basics */ public void testBasics() throws Exception { String file = newTestFile(); DBStore db = new DBStore(file, false, false,false); // as this code is meant to test data structure calculcations // and stuff like that, we may want to disable transactions // that just slow us down. // mgr.disableTransactions(); RecordData[] d = new RecordData[RECORDS]; int recordCount = 0, rootCount = 0; int inserts = 0, updates = 0, deletes = 0, fetches = 0; int rootgets = 0, rootsets = 0; int slot = -1; try { for (int i = 0; i < ROUNDS; i++) { if ((i % RPPROMILLE) == 0) System.out.print("\rComplete: " + i / RPPROMILLE + "/1000th"); // close and re-open a couple of times during the // test, in order to check flushing etcetera. if ((i % (ROUNDS / 5)) == 0) { System.out.print(" (reopened at round " + i / RPPROMILLE + ")"); db.close(); db = new DBStore(file, false, false,false); // db.disableTransactions(); } // generate a random number and assign ranges to operations: // 0-10 = insert, 20 = delete, 30-50 = update, 51 = set root, // 52 = get root, rest = fetch. int op = rnd.nextInt(100); if (op <= 10) { // INSERT RECORD if (recordCount == RECORDS) { i -= 1; continue; } slot = 0; while (d[slot] != null) slot++; d[slot] = new RecordData(0, rnd.nextInt(MAXSIZE), (byte) rnd.nextInt()); d[slot].rowid = db.insert(UtilTT.makeRecord(d[slot].size, d[slot].b)); recordCount++; inserts++; } else if (op == 20) { // DELETE RECORD if (recordCount == 0) { i -= 1; continue; } slot = getRandomAllocatedSlot(d); db.delete(d[slot].rowid); d[slot] = null; recordCount--; deletes++; } else if (op <= 50) { // UPDATE RECORD if (recordCount == 0) { i -= 1; continue; } slot = getRandomAllocatedSlot(d); d[slot].size = rnd.nextInt(MAXSIZE); d[slot].b = (byte) rnd.nextInt(); db.update(d[slot].rowid, UtilTT.makeRecord(d[slot].size, d[slot].b)); updates++; } else if (op == 51) { // SET ROOT int root = rnd.nextInt(Magic.FILE_HEADER_NROOTS); if (root > 10) { //DONT do this for reserved roots roots[root] = rnd.nextLong(); db.setRoot((byte) root, roots[root]); rootsets++; } } else if (op == 52) { // GET ROOT if (rootCount == 0) { i -= 1; continue; } int root = getRandomAllocatedRoot(); if (root > 10) { //DONT do this for reserved roots assertEquals("root", roots[root], db.getRoot((byte) root)); rootgets++; } } else { // FETCH RECORD if (recordCount == 0) { i -= 1; continue; } slot = getRandomAllocatedSlot(d); byte[] data = (byte[]) db.fetch(d[slot].rowid); assertTrue("fetch round=" + i + ", slot=" + slot + ", " + d[slot], UtilTT.checkRecord(data, d[slot].size, d[slot].b)); fetches++; } } db.close(); } catch (Throwable e) { e.printStackTrace(); throw new RuntimeException("aborting test at slot " + slot + ": ", e); } finally { System.out.println("records : " + recordCount); System.out.println("deletes : " + deletes); System.out.println("inserts : " + inserts); System.out.println("updates : " + updates); System.out.println("fetches : " + fetches); System.out.println("rootget : " + rootgets); System.out.println("rootset : " + rootsets); int totalSize = 0; for (int i = 0; i < RECORDS; i++) if (d[i] != null) totalSize += d[i].size; System.out.println("total outstanding size: " + totalSize); //System.out.println("---"); //for (int i = 0; i < RECORDS; i++) // if (d[i] != null) // System.out.println("slot " + i + ": " + d[i]); } } } ================================================ FILE: src/test/java/org/apache/jdbm/UtilTT.java ================================================ /******************************************************************************* * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.apache.jdbm; import junit.framework.TestCase; /** * This class contains some test utilities. */ public class UtilTT { /** * Creates a "record" containing "length" repetitions of the indicated byte. */ public static byte[] makeRecord(int length, byte b) { byte[] retval = new byte[length]; for (int i = 0; i < length; i++) retval[i] = b; return retval; } /** * Checks whether the record has the indicated length and data */ public static boolean checkRecord(byte[] data, int length, byte b) { TestCase.assertEquals("lenght does not match", length, data.length); for (int i = 0; i < length; i++) TestCase.assertEquals("byte " + i, b, data[i]); return true; } } ================================================ FILE: src/test/java/org/apache/jdbm/UtilsTest.java ================================================ package org.apache.jdbm; import junit.framework.TestCase; public class UtilsTest extends TestCase { public void testFormatSpaceUsage() { assertEquals("100B", Utils.formatSpaceUsage(100L)); assertEquals("1024B", Utils.formatSpaceUsage(1024L)); assertEquals("10KB", Utils.formatSpaceUsage(10024L)); assertEquals("15MB", Utils.formatSpaceUsage(15000000)); } } ================================================ FILE: src/test/java/org/apache/jdbm/junk/HugeData.java ================================================ package org.apache.jdbm.junk; import org.apache.jdbm.DB; import org.apache.jdbm.DBMaker; import java.io.IOException; import java.util.Map; /** * Creates huge file */ public class HugeData { static public void main(String[] args) throws IOException, InterruptedException { long startTime = System.currentTimeMillis(); //new File("/media/b0beb325-d9fe-4a30-9f58-77e6b15e6b7d/lost+found/large/").mkdirs(); DB db = DBMaker.openFile("/media/b0beb325-d9fe-4a30-9f58-77e6b15e6b7d/db") .disableTransactions() .make(); Map map = db.createTreeMap("test"); // List test = db.createLinkedList("test"); final double max = 1e10; for (Long i = 1L; i < max; i++) { if (i % 1e6 == 0) { System.out.println(i + " - " +(100D * i /max) + " %"); //Thread.sleep(1000000); } // test.add(i); map.put(i,i.hashCode()); } db.defrag(true); db.close(); System.out.println("Finished, total time: " + (System.currentTimeMillis() - startTime) / 1000); } } ================================================ FILE: src/test/java/org/apache/jdbm/junk/MappedBufferGrow.java ================================================ package org.apache.jdbm.junk; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; /** * This demonstrates MappedByteBuffer behaviour when file size is expanding */ public class MappedBufferGrow { public static void main(String[] args) throws IOException { ///File f = File.createTempFile("aaa","aaa"); File f = new File("test"); f.deleteOnExit(); RandomAccessFile raf = new RandomAccessFile(f, "rw"); raf.setLength((long) 1e6); System.out.println("length is " + raf.length()); raf.seek((long) 2e6); raf.write(1); System.out.println("length is " + raf.length()); MappedByteBuffer b = raf.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, (long) 3e6); System.out.println("length after mapping is " + raf.length()); b.position((int) (3e6 - 10)); b.put((byte) 1); b.force(); System.out.println("length after writting to MappedByteBuffer is " + raf.length()); } } ================================================ FILE: src/test/java/org/apache/jdbm/junk/MappedBufferVersusRaf.java ================================================ package org.apache.jdbm.junk; import java.io.*; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.util.Random; /** * This script compares performance of memory mapped buffer versus RandomAccessFile */ public class MappedBufferVersusRaf { public static final int NUNBER_OF_READS = (int) 1e7; public static final int FILE_SIZE = (int) 1e6; public static final int BUFFER_SIZE = 2048; public static void main(String[] args) throws IOException { File f = File.createTempFile("mapped", "mapped"); f.deleteOnExit(); byte[] buffer = new byte[BUFFER_SIZE]; OutputStream o = new BufferedOutputStream(new FileOutputStream(f)); for (int i = 0; i < FILE_SIZE; i += BUFFER_SIZE) { o.write(buffer); } o.close(); System.out.println("File filled"); //open as RAF and read file randomly long t = System.currentTimeMillis(); RandomAccessFile raf = new RandomAccessFile(f, "r"); Random r = new Random(0); ByteBuffer byteBuf = ByteBuffer.wrap(buffer); for (int i = 0; i < NUNBER_OF_READS; i++) { long pos = r.nextInt(FILE_SIZE - BUFFER_SIZE); raf.seek(pos); raf.readFully(buffer); //read some random numbers just as JDBM does byteBuf.getLong(10); byteBuf.getLong(100); byteBuf.getLong(500); } System.out.println("RAF took " + (System.currentTimeMillis() - t)); //previous test was not so good, so try to map entire file into memory t = System.currentTimeMillis(); FileChannel channel = raf.getChannel(); r = new Random(0); MappedByteBuffer byteBuf3 = channel.map(FileChannel.MapMode.READ_ONLY, 0, raf.length()); byteBuf3.load(); for (int i = 0; i < NUNBER_OF_READS; i++) { int pos = r.nextInt(FILE_SIZE - BUFFER_SIZE); //read some random numbers just as JDBM does byteBuf3.getLong(pos + 10); byteBuf3.getLong(pos + 100); byteBuf3.getLong(pos + 500); } System.out.println("MappedByteBuffer took " + (System.currentTimeMillis() - t)); } } ================================================ FILE: src/test/java/org/apache/jdbm/junk/RandomInsertLongs.java ================================================ package org.apache.jdbm.junk; import org.apache.jdbm.*; import java.io.IOException; import java.util.Random; import java.util.Set; //TODO 112444910 public class RandomInsertLongs { public static void main(String[] args) throws IOException { DB db = DBMaker.openFile("/hugo/large/test"+System.currentTimeMillis()) .disableTransactions() .enableHardCache() .make(); Set m = db.createTreeSet("test"); Random r = new Random(234); long printEvery = (long) 1e7; long readEvery = (long) 1e5; long t = System.currentTimeMillis(); for(long i = 1;;i++){ m.add(makeLong(i)); //make a few random reads if(i%readEvery == 0 && i>200000000){ for(long j = 1;j0 && !m.contains(makeLong(key))) throw new InternalError(""+key); } } //print time for last round if(i%printEvery==0){ long time = System.currentTimeMillis(); System.out.println(i + " - "+(time-t)+" ms"); t = time; } } } public static Long makeLong(long value){ return ((long)(int)(value ^ (value >>> 32))) + ((long)(int)(((value+1) ^ ((value+1) >>> 32)))<<32); } }