(null, null);
browser = browse(key,true);
if (browser.getNext(tuple)) {
return tuple;
} else {
return null;
}
}
/**
* Get a browser initially positioned at the beginning of the BTree.
*
* WARNING: If you make structural modifications to the BTree during
* browsing, you will get inconsistent browing results.
*
*
* @return Browser positionned at the beginning of the BTree.
*/
@SuppressWarnings("unchecked")
public BTreeTupleBrowser browse()
throws IOException {
try {
lock.readLock().lock();
BTreeNode rootNode = getRoot();
if (rootNode == null) {
return EMPTY_BROWSER;
}
return rootNode.findFirst();
} finally {
lock.readLock().unlock();
}
}
/**
* Get a browser initially positioned just before the given key.
*
* WARNING: �If you make structural modifications to the BTree during
* browsing, you will get inconsistent browing results.
*
*
* @param key Key used to position the browser. If null, the browser
* will be positionned after the last entry of the BTree.
* (Null is considered to be an "infinite" key)
* @return Browser positionned just before the given key.
*/
@SuppressWarnings("unchecked")
public BTreeTupleBrowser browse(final K key, final boolean inclusive)
throws IOException {
try {
lock.readLock().lock();
BTreeNode rootNode = getRoot();
if (rootNode == null) {
return EMPTY_BROWSER;
}
BTreeTupleBrowser browser = rootNode.find(_height, key, inclusive);
return browser;
} finally {
lock.readLock().unlock();
}
}
/**
* Return the persistent record identifier of the BTree.
*/
public long getRecid() {
return _recid;
}
/**
* Return the root BTreeNode, or null if it doesn't exist.
*/
BTreeNode getRoot()
throws IOException {
if (_root == 0) {
return null;
}
BTreeNode root = _db.fetch(_root, _nodeSerializer);
if (root != null) {
root._recid = _root;
root._btree = this;
}
return root;
}
static BTree readExternal(DataInput in, Serialization ser)
throws IOException, ClassNotFoundException {
BTree tree = new BTree();
tree._db = ser.db;
tree._height = in.readInt();
tree._recid = in.readLong();
tree._root = in.readLong();
tree._entries = in.readLong();
tree.hasValues = in.readBoolean();
tree._comparator = (Comparator) ser.deserialize(in);
tree.keySerializer = (Serializer) ser.deserialize(in);
tree.valueSerializer = (Serializer) ser.deserialize(in);
return tree;
}
public void writeExternal(DataOutput out)
throws IOException {
out.writeInt(_height);
out.writeLong(_recid);
out.writeLong(_root);
out.writeLong(_entries);
out.writeBoolean(hasValues);
_db.defaultSerializer().serialize(out, _comparator);
_db.defaultSerializer().serialize(out, keySerializer);
_db.defaultSerializer().serialize(out, valueSerializer);
}
/**
* Copyes tree from one db to other, defragmenting it allong the way
* @param recid
* @param r1
* @param r2
* @throws IOException
*/
public static void defrag(long recid, DBStore r1, DBStore r2) throws IOException {
try {
byte[] data = r1.fetchRaw(recid);
r2.forceInsert(recid, data);
DataInput in = new DataInputOutput(data);
BTree t = (BTree) r1.defaultSerializer().deserialize(in);
t.loadValues = false;
t._db = r1;
t._nodeSerializer = new BTreeNode(t, false);
BTreeNode p = t.getRoot();
if (p != null) {
r2.forceInsert(t._root, r1.fetchRaw(t._root));
p.defrag(r1, r2);
}
} catch (ClassNotFoundException e) {
throw new IOError(e);
}
}
/**
* Browser returning no element.
*/
private static final BTreeTupleBrowser EMPTY_BROWSER = new BTreeTupleBrowser() {
public boolean getNext(BTreeTuple tuple) {
return false;
}
public boolean getPrevious(BTreeTuple tuple) {
return false;
}
public void remove(Object key) {
throw new IndexOutOfBoundsException();
}
};
/**
* add RecordListener which is notified about record changes
*
* @param listener
*/
public void addRecordListener(RecordListener listener) {
recordListeners = Arrays.copyOf(recordListeners, recordListeners.length + 1);
recordListeners[recordListeners.length - 1] = listener;
}
/**
* remove RecordListener which is notified about record changes
*
* @param listener
*/
public void removeRecordListener(RecordListener listener) {
List l = Arrays.asList(recordListeners);
l.remove(listener);
recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]);
}
public DBAbstract getRecordManager() {
return _db;
}
public Comparator getComparator() {
return _comparator;
}
/**
* Deletes all BTreeNodes in this BTree
*/
public void clear()
throws IOException {
try {
lock.writeLock().lock();
BTreeNode rootNode = getRoot();
if (rootNode != null)
rootNode.delete();
_entries = 0;
modCount++;
} finally {
lock.writeLock().unlock();
}
}
/**
* Used for debugging and testing only. Populates the 'out' list with
* the recids of all child nodes in the BTree.
*
* @param out
* @throws IOException
*/
void dumpChildNodeRecIDs(List out) throws IOException {
BTreeNode root = getRoot();
if (root != null) {
out.add(root._recid);
root.dumpChildNodeRecIDs(out, _height);
}
}
public boolean hasValues() {
return hasValues;
}
/**
* Browser to traverse a collection of tuples. The browser allows for
* forward and reverse order traversal.
*
*
*/
static interface BTreeTupleBrowser {
/**
* Get the next tuple.
*
* @param tuple Tuple into which values are copied.
* @return True if values have been copied in tuple, or false if there is no next tuple.
*/
boolean getNext(BTree.BTreeTuple tuple) throws IOException;
/**
* Get the previous tuple.
*
* @param tuple Tuple into which values are copied.
* @return True if values have been copied in tuple, or false if there is no previous tuple.
*/
boolean getPrevious(BTree.BTreeTuple tuple) throws IOException;
/**
* Remove an entry with given key, and increases browsers expectedModCount
* This method is here to support 'ConcurrentModificationException' on Map interface.
*
* @param key
*/
void remove(K key) throws IOException;
}
/**
* Tuple consisting of a key-value pair.
*/
static final class BTreeTuple {
K key;
V value;
BTreeTuple() {
// empty
}
BTreeTuple(K key, V value) {
this.key = key;
this.value = value;
}
}
}
================================================
FILE: src/main/java/org/apache/jdbm/BTreeLazyRecord.java
================================================
package org.apache.jdbm;
import java.io.*;
/**
* An record lazily loaded from store.
* This is used in BTree/HTree to store big records outside of index tree
*
* @author Jan Kotek
*/
class BTreeLazyRecord {
private E value = null;
private DBAbstract db;
private Serializer serializer;
final long recid;
BTreeLazyRecord(DBAbstract db, long recid, Serializer serializer) {
this.db = db;
this.recid = recid;
this.serializer = serializer;
}
E get() {
if (value != null) return value;
try {
value = db.fetch(recid, serializer);
} catch (IOException e) {
throw new IOError(e);
}
return value;
}
void delete() {
try {
db.delete(recid);
} catch (IOException e) {
throw new IOError(e);
}
value = null;
serializer = null;
db = null;
}
/**
* Serialier used to insert already serialized data into store
*/
static final Serializer FAKE_SERIALIZER = new Serializer() {
public void serialize(DataOutput out, Object obj) throws IOException {
byte[] data = (byte[]) obj;
out.write(data);
}
public Object deserialize(DataInput in) throws IOException, ClassNotFoundException {
throw new UnsupportedOperationException();
}
};
static Object fastDeser(DataInputOutput in, Serializer serializer, int expectedSize) throws IOException, ClassNotFoundException {
//we should propably copy data for deserialization into separate buffer and pass it to Serializer
//but to make it faster, Serializer will operate directly on top of buffer.
//and we check that it readed correct number of bytes.
int origAvail = in.available();
if (origAvail == 0)
throw new InternalError(); //is backed up by byte[] buffer, so there should be always avail bytes
Object ret = serializer.deserialize(in);
//check than valueSerializer did not read more bytes, if yes it readed bytes from next record
int readed = origAvail - in.available();
if (readed > expectedSize)
throw new IOException("Serializer readed more bytes than is record size.");
else if (readed != expectedSize) {
//deserializer did not readed all bytes, unussual but valid.
//Skip some to get into correct position
for (int ii = 0; ii < expectedSize - readed; ii++)
in.readUnsignedByte();
}
return ret;
}
/**
* if value in tree is serialized in more bytes, it is stored as separate record outside of tree
* This value must be always smaller than 250
*/
static final int MAX_INTREE_RECORD_SIZE = 32;
static {
if (MAX_INTREE_RECORD_SIZE > 250) throw new Error();
}
static final int NULL = 255;
static final int LAZY_RECORD = 254;
}
================================================
FILE: src/main/java/org/apache/jdbm/BTreeMap.java
================================================
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.IOError;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentNavigableMap;
/**
* Wrapper for BTree which implements ConcurrentNavigableMap interface
*
* @param key type
* @param value type
*
* @author Jan Kotek
*/
class BTreeMap extends AbstractMap implements ConcurrentNavigableMap {
protected BTree tree;
protected final K fromKey;
protected final K toKey;
protected final boolean readonly;
protected NavigableSet keySet2;
private final boolean toInclusive;
private final boolean fromInclusive;
public BTreeMap(BTree tree, boolean readonly) {
this(tree, readonly, null, false, null, false);
}
protected BTreeMap(BTree tree, boolean readonly, K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) {
this.tree = tree;
this.fromKey = fromKey;
this.fromInclusive = fromInclusive;
this.toKey = toKey;
this.toInclusive = toInclusive;
this.readonly = readonly;
}
@Override
public Set> entrySet() {
return _entrySet;
}
private final Set> _entrySet = new AbstractSet>() {
protected Entry newEntry(K k, V v) {
return new SimpleEntry(k, v) {
private static final long serialVersionUID = 978651696969194154L;
public V setValue(V arg0) {
BTreeMap.this.put(getKey(), arg0);
return super.setValue(arg0);
}
};
}
public boolean add(java.util.Map.Entry e) {
if (readonly)
throw new UnsupportedOperationException("readonly");
try {
if (e.getKey() == null)
throw new NullPointerException("Can not add null key");
if (!inBounds(e.getKey()))
throw new IllegalArgumentException("key outside of bounds");
return tree.insert(e.getKey(), e.getValue(), true) == null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
@SuppressWarnings("unchecked")
public boolean contains(Object o) {
if (o instanceof Entry) {
Entry e = (java.util.Map.Entry) o;
try {
if (!inBounds(e.getKey()))
return false;
if (e.getKey() != null && tree.get(e.getKey()) != null)
return true;
} catch (IOException e1) {
throw new IOError(e1);
}
}
return false;
}
public Iterator> iterator() {
try {
final BTree.BTreeTupleBrowser br = fromKey == null ?
tree.browse() : tree.browse(fromKey, fromInclusive);
return new Iterator>() {
private Entry next;
private K lastKey;
void ensureNext() {
try {
BTree.BTreeTuple t = new BTree.BTreeTuple();
if (br.getNext(t) && inBounds(t.key))
next = newEntry(t.key, t.value);
else
next = null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
{
ensureNext();
}
public boolean hasNext() {
return next != null;
}
public java.util.Map.Entry next() {
if (next == null)
throw new NoSuchElementException();
Entry ret = next;
lastKey = ret.getKey();
//move to next position
ensureNext();
return ret;
}
public void remove() {
if (readonly)
throw new UnsupportedOperationException("readonly");
if (lastKey == null)
throw new IllegalStateException();
try {
br.remove(lastKey);
lastKey = null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
};
} catch (IOException e) {
throw new IOError(e);
}
}
@SuppressWarnings("unchecked")
public boolean remove(Object o) {
if (readonly)
throw new UnsupportedOperationException("readonly");
if (o instanceof Entry) {
Entry e = (java.util.Map.Entry) o;
try {
//check for nulls
if (e.getKey() == null || e.getValue() == null)
return false;
if (!inBounds(e.getKey()))
throw new IllegalArgumentException("out of bounds");
//get old value, must be same as item in entry
V v = get(e.getKey());
if (v == null || !e.getValue().equals(v))
return false;
V v2 = tree.remove(e.getKey());
return v2 != null;
} catch (IOException e1) {
throw new IOError(e1);
}
}
return false;
}
public int size() {
return BTreeMap.this.size();
}
public void clear(){
if(fromKey!=null || toKey!=null)
super.clear();
else
try {
tree.clear();
} catch (IOException e) {
throw new IOError(e);
}
}
};
public boolean inBounds(K e) {
if(fromKey == null && toKey == null)
return true;
Comparator comp = comparator();
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
if(fromKey!=null){
final int compare = comp.compare(e, fromKey);
if(compare<0) return false;
if(!fromInclusive && compare == 0) return false;
}
if(toKey!=null){
final int compare = comp.compare(e, toKey);
if(compare>0)return false;
if(!toInclusive && compare == 0) return false;
}
return true;
}
@SuppressWarnings("unchecked")
@Override
public V get(Object key) {
try {
if (key == null)
return null;
if (!inBounds((K) key))
return null;
return tree.get((K) key);
} catch (ClassCastException e) {
return null;
} catch (IOException e) {
throw new IOError(e);
}
}
@SuppressWarnings("unchecked")
@Override
public V remove(Object key) {
if (readonly)
throw new UnsupportedOperationException("readonly");
try {
if (key == null || tree.get((K) key) == null)
return null;
if (!inBounds((K) key))
throw new IllegalArgumentException("out of bounds");
return tree.remove((K) key);
} catch (ClassCastException e) {
return null;
} catch (IOException e) {
throw new IOError(e);
}
}
public V put(K key, V value) {
if (readonly)
throw new UnsupportedOperationException("readonly");
try {
if (key == null || value == null)
throw new NullPointerException("Null key or value");
if (!inBounds(key))
throw new IllegalArgumentException("out of bounds");
return tree.insert(key, value, true);
} catch (IOException e) {
throw new IOError(e);
}
}
public void clear(){
entrySet().clear();
}
@SuppressWarnings("unchecked")
@Override
public boolean containsKey(Object key) {
if (key == null)
return false;
try {
if (!inBounds((K) key))
return false;
V v = tree.get((K) key);
return v != null;
} catch (IOException e) {
throw new IOError(e);
} catch (ClassCastException e) {
return false;
}
}
public Comparator super K> comparator() {
return tree._comparator;
}
public K firstKey() {
if (isEmpty())
return null;
try {
BTree.BTreeTupleBrowser b = fromKey == null ? tree.browse() : tree.browse(fromKey,fromInclusive);
BTree.BTreeTuple t = new BTree.BTreeTuple();
b.getNext(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public K lastKey() {
if (isEmpty())
return null;
try {
BTree.BTreeTupleBrowser b = toKey == null ? tree.browse(null,true) : tree.browse(toKey,false);
BTree.BTreeTuple t = new BTree.BTreeTuple();
b.getPrevious(t);
if(!toInclusive && toKey!=null){
//make sure we wont return last key
Comparator c = comparator();
if(c==null) c=Utils.COMPARABLE_COMPARATOR;
if(c.compare(t.key,toKey)==0)
b.getPrevious(t);
}
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public ConcurrentNavigableMap headMap(K toKey2, boolean inclusive) {
K toKey3 = Utils.min(this.toKey,toKey2,comparator());
boolean inclusive2 = toKey3 == toKey? toInclusive : inclusive;
return new BTreeMap(tree, readonly, this.fromKey, this.fromInclusive, toKey3, inclusive2);
}
public ConcurrentNavigableMap headMap(K toKey) {
return headMap(toKey,false);
}
public Entry lowerEntry(K key) {
K k = lowerKey(key);
return k==null? null : new SimpleEntry(k,get(k));
}
public K lowerKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.min(key,toKey,comparator());
try {
BTree.BTreeTupleBrowser b = tree.browse(key2,true) ;
BTree.BTreeTuple t = new BTree.BTreeTuple();
b.getPrevious(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry floorEntry(K key) {
K k = floorKey(key);
return k==null? null : new SimpleEntry(k,get(k));
}
public K floorKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.max(key,fromKey,comparator());
try {
BTree.BTreeTupleBrowser b = tree.browse(key2,true) ;
BTree.BTreeTuple t = new BTree.BTreeTuple();
b.getNext(t);
Comparator comp = comparator();
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
if(comp.compare(t.key,key2) == 0)
return t.key;
b.getPrevious(t);
b.getPrevious(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry ceilingEntry(K key) {
K k = ceilingKey(key);
return k==null? null : new SimpleEntry(k,get(k));
}
public K ceilingKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.min(key,toKey,comparator());
try {
BTree.BTreeTupleBrowser b = tree.browse(key2,true) ;
BTree.BTreeTuple t = new BTree.BTreeTuple();
b.getNext(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry higherEntry(K key) {
K k = higherKey(key);
return k==null? null : new SimpleEntry(k,get(k));
}
public K higherKey(K key) {
if (isEmpty())
return null;
K key2 = Utils.max(key,fromKey,comparator());
try {
BTree.BTreeTupleBrowser b = tree.browse(key2,false) ;
BTree.BTreeTuple t = new BTree.BTreeTuple();
b.getNext(t);
return t.key;
} catch (IOException e) {
throw new IOError(e);
}
}
public Entry firstEntry() {
K k = firstKey();
return k==null? null : new SimpleEntry(k,get(k));
}
public Entry lastEntry() {
K k = lastKey();
return k==null? null : new SimpleEntry(k,get(k));
}
public Entry pollFirstEntry() {
Entry first = firstEntry();
if(first!=null)
remove(first.getKey());
return first;
}
public Entry pollLastEntry() {
Entry last = lastEntry();
if(last!=null)
remove(last.getKey());
return last;
}
public ConcurrentNavigableMap descendingMap() {
throw new UnsupportedOperationException("not implemented yet");
//TODO implement descending (reverse order) map
}
public NavigableSet keySet() {
return navigableKeySet();
}
public NavigableSet navigableKeySet() {
if(keySet2 == null)
keySet2 = new BTreeSet((BTreeMap) this);
return keySet2;
}
public NavigableSet descendingKeySet() {
return descendingMap().navigableKeySet();
}
public ConcurrentNavigableMap tailMap(K fromKey) {
return tailMap(fromKey,true);
}
public ConcurrentNavigableMap tailMap(K fromKey2, boolean inclusive) {
K fromKey3 = Utils.max(this.fromKey,fromKey2,comparator());
boolean inclusive2 = fromKey3 == toKey? toInclusive : inclusive;
return new BTreeMap(tree, readonly, fromKey3, inclusive2, toKey, toInclusive);
}
public ConcurrentNavigableMap subMap(K fromKey, boolean fromInclusive, K toKey, boolean toInclusive) {
Comparator comp = comparator();
if (comp == null) comp = Utils.COMPARABLE_COMPARATOR;
if (comp.compare(fromKey, toKey) > 0)
throw new IllegalArgumentException("fromKey is bigger then toKey");
return new BTreeMap(tree, readonly, fromKey, fromInclusive, toKey, toInclusive);
}
public ConcurrentNavigableMap subMap(K fromKey, K toKey) {
return subMap(fromKey,true,toKey,false);
}
public BTree getTree() {
return tree;
}
public void addRecordListener(RecordListener listener) {
tree.addRecordListener(listener);
}
public DBAbstract getRecordManager() {
return tree.getRecordManager();
}
public void removeRecordListener(RecordListener listener) {
tree.removeRecordListener(listener);
}
public int size() {
if (fromKey == null && toKey == null)
return (int) tree._entries; //use fast counter on tree if Map has no bounds
else {
//had to count items in iterator
Iterator iter = keySet().iterator();
int counter = 0;
while (iter.hasNext()) {
iter.next();
counter++;
}
return counter;
}
}
public V putIfAbsent(K key, V value) {
tree.lock.writeLock().lock();
try{
if (!containsKey(key))
return put(key, value);
else
return get(key);
}finally {
tree.lock.writeLock().unlock();
}
}
public boolean remove(Object key, Object value) {
tree.lock.writeLock().lock();
try{
if (containsKey(key) && get(key).equals(value)) {
remove(key);
return true;
} else return false;
}finally {
tree.lock.writeLock().unlock();
}
}
public boolean replace(K key, V oldValue, V newValue) {
tree.lock.writeLock().lock();
try{
if (containsKey(key) && get(key).equals(oldValue)) {
put(key, newValue);
return true;
} else return false;
}finally {
tree.lock.writeLock().unlock();
}
}
public V replace(K key, V value) {
tree.lock.writeLock().lock();
try{
if (containsKey(key)) {
return put(key, value);
} else return null;
}finally {
tree.lock.writeLock().unlock();
}
}
}
================================================
FILE: src/main/java/org/apache/jdbm/BTreeNode.java
================================================
/*******************************************************************************
* Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package org.apache.jdbm;
import java.io.*;
import java.util.ConcurrentModificationException;
import java.util.List;
/**
* Node of a BTree.
*
* The node contains a number of key-value pairs. Keys are ordered to allow
* dichotomic search. If value is too big, it is stored in separate record
* and only recid reference is stored
*
* If the node is a leaf node, the keys and values are user-defined and
* represent entries inserted by the user.
*
* If the node is non-leaf, each key represents the greatest key in the
* underlying BTreeNode and the values are recids pointing to the children BTreeNodes.
* The only exception is the rightmost BTreeNode, which is considered to have an
* "infinite" key value, meaning that any insert will be to the left of this
* pseudo-key
*
* @author Alex Boisvert
* @author Jan Kotek
*/
final class BTreeNode
implements Serializer> {
private static final boolean DEBUG = false;
/**
* Parent B+Tree.
*/
transient BTree _btree;
/**
* This BTreeNode's record ID in the DB.
*/
protected transient long _recid;
/**
* Flag indicating if this is a leaf BTreeNode.
*/
protected boolean _isLeaf;
/**
* Keys of children nodes
*/
protected K[] _keys;
/**
* Values associated with keys. (Only valid if leaf node)
*/
protected Object[] _values;
/**
* Children nodes (recids) associated with keys. (Only valid if non-leaf node)
*/
protected long[] _children;
/**
* Index of first used item at the node
*/
protected byte _first;
/**
* Previous leaf node (only if this node is a leaf)
*/
protected long _previous;
/**
* Next leaf node (only if this node is a leaf)
*/
protected long _next;
/**
* Return the B+Tree that is the owner of this {@link BTreeNode}.
*/
public BTree getBTree() {
return _btree;
}
/**
* No-argument constructor used by serialization.
*/
public BTreeNode() {
// empty
}
/**
* Root node overflow constructor
*/
@SuppressWarnings("unchecked")
BTreeNode(BTree btree, BTreeNode root, BTreeNode overflow)
throws IOException {
_btree = btree;
_isLeaf = false;
_first = BTree.DEFAULT_SIZE - 2;
_keys = (K[]) new Object[BTree.DEFAULT_SIZE];
_keys[BTree.DEFAULT_SIZE - 2] = overflow.getLargestKey();
_keys[BTree.DEFAULT_SIZE - 1] = root.getLargestKey();
_children = new long[BTree.DEFAULT_SIZE];
_children[BTree.DEFAULT_SIZE - 2] = overflow._recid;
_children[BTree.DEFAULT_SIZE - 1] = root._recid;
_recid = _btree._db.insert(this, this,false);
}
/**
* Root node (first insert) constructor.
*/
@SuppressWarnings("unchecked")
BTreeNode(BTree btree, K key, V value)
throws IOException {
_btree = btree;
_isLeaf = true;
_first = BTree.DEFAULT_SIZE - 2;
_keys = (K[]) new Object[BTree.DEFAULT_SIZE];
_keys[BTree.DEFAULT_SIZE - 2] = key;
_keys[BTree.DEFAULT_SIZE - 1] = null; // I am the root BTreeNode for now
_values = new Object[BTree.DEFAULT_SIZE];
_values[BTree.DEFAULT_SIZE - 2] = value;
_values[BTree.DEFAULT_SIZE - 1] = null; // I am the root BTreeNode for now
_recid = _btree._db.insert(this, this,false);
}
/**
* Overflow node constructor. Creates an empty BTreeNode.
*/
@SuppressWarnings("unchecked")
BTreeNode(BTree btree, boolean isLeaf){
_btree = btree;
_isLeaf = isLeaf;
// node will initially be half-full
_first = BTree.DEFAULT_SIZE / 2;
_keys = (K[]) new Object[BTree.DEFAULT_SIZE];
if (isLeaf) {
_values = new Object[BTree.DEFAULT_SIZE];
} else {
_children = new long[BTree.DEFAULT_SIZE];
}
try{
_recid = _btree._db.insert(this, this,false);
}catch(IOException e ){
throw new IOError(e);
}
}
/**
* Get largest key under this BTreeNode. Null is considered to be the
* greatest possible key.
*/
K getLargestKey() {
return _keys[BTree.DEFAULT_SIZE - 1];
}
/**
* Return true if BTreeNode is empty.
*/
boolean isEmpty() {
if (_isLeaf) {
return (_first == _values.length - 1);
} else {
return (_first == _children.length - 1);
}
}
/**
* Return true if BTreeNode is full.
*/
boolean isFull() {
return (_first == 0);
}
/**
* Find the object associated with the given key.
*
* @param height Height of the current BTreeNode (zero is leaf node)
* @param key The key
* @return TupleBrowser positionned just before the given key, or before
* next greater key if key isn't found.
*/
BTree.BTreeTupleBrowser find(int height, final K key, final boolean inclusive)
throws IOException {
byte index = findChildren(key,inclusive);
height -= 1;
if (height == 0) {
// leaf node
return new Browser(this, index);
} else {
// non-leaf node
BTreeNode child = loadNode(_children[index]);
return child.find(height, key,inclusive);
}
}
/**
* Find value associated with the given key.
*
* @param height Height of the current BTreeNode (zero is leaf node)
* @param key The key
* @return TupleBrowser positionned just before the given key, or before
* next greater key if key isn't found.
*/
V findValue(int height, K key)
throws IOException {
byte index = findChildren(key,true);
height -= 1;
if (height == 0) {
K key2 = _keys[index];
// // get returns the matching key or the next ordered key, so we must
// // check if we have an exact match
if (key2 == null || compare(key, key2) != 0)
return null;
// leaf node
if (_values[index] instanceof BTreeLazyRecord)
return ((BTreeLazyRecord) _values[index]).get();
else
return (V) _values[index];
} else {
// non-leaf node
BTreeNode child = loadNode(_children[index]);
return child.findValue(height, key);
}
}
/**
* Find first entry and return a browser positioned before it.
*
* @return TupleBrowser positionned just before the first entry.
*/
BTree.BTreeTupleBrowser findFirst()
throws IOException {
if (_isLeaf) {
return new Browser(this, _first);
} else {
BTreeNode child = loadNode(_children[_first]);
return child.findFirst();
}
}
/**
* Deletes this BTreeNode and all children nodes from the record manager
*/
void delete()
throws IOException {
if (_isLeaf) {
if (_next != 0) {
BTreeNode nextNode = loadNode(_next);
if (nextNode._previous == _recid) { // this consistency check can be removed in production code
nextNode._previous = _previous;
_btree._db.update(nextNode._recid, nextNode, nextNode);
} else {
throw new Error("Inconsistent data in BTree");
}
}
if (_previous != 0) {
BTreeNode previousNode = loadNode(_previous);
if (previousNode._next != _recid) { // this consistency check can be removed in production code
previousNode._next = _next;
_btree._db.update(previousNode._recid, previousNode, previousNode);
} else {
throw new Error("Inconsistent data in BTree");
}
}
} else {
int left = _first;
int right = BTree.DEFAULT_SIZE - 1;
for (int i = left; i <= right; i++) {
BTreeNode childNode = loadNode(_children[i]);
childNode.delete();
}
}
_btree._db.delete(_recid);
}
/**
* Insert the given key and value.
*
* Since the Btree does not support duplicate entries, the caller must
* specify whether to replace the existing value.
*
* @param height Height of the current BTreeNode (zero is leaf node)
* @param key Insert key
* @param value Insert value
* @param replace Set to true to replace the existing value, if one exists.
* @return Insertion result containing existing value OR a BTreeNode if the key
* was inserted and provoked a BTreeNode overflow.
*/
InsertResult insert(int height, K key, final V value, final boolean replace)
throws IOException {
InsertResult result;
long overflow;
final byte index = findChildren(key,true);
height -= 1;
if (height == 0) {
//reuse InsertResult instance to avoid GC trashing on massive inserts
result = _btree.insertResultReuse;
_btree.insertResultReuse = null;
if (result == null)
result = new InsertResult();
// inserting on a leaf BTreeNode
overflow = -1;
if (DEBUG) {
System.out.println("BTreeNode.insert() Insert on leaf node key=" + key
+ " value=" + value + " index=" + index);
}
if (compare(_keys[index], key) == 0) {
// key already exists
if (DEBUG) {
System.out.println("BTreeNode.insert() Key already exists.");
}
boolean isLazyRecord = _values[index] instanceof BTreeLazyRecord;
if (isLazyRecord)
result._existing = ((BTreeLazyRecord) _values[index]).get();
else
result._existing = (V) _values[index];
if (replace) {
//remove old lazy record if necesarry
if (isLazyRecord)
((BTreeLazyRecord) _values[index]).delete();
_values[index] = value;
_btree._db.update(_recid, this, this);
}
// return the existing key
return result;
}
} else {
// non-leaf BTreeNode
BTreeNode child = loadNode(_children[index]);
result = child.insert(height, key, value, replace);
if (result._existing != null) {
// return existing key, if any.
return result;
}
if (result._overflow == null) {
// no overflow means we're done with insertion
return result;
}
// there was an overflow, we need to insert the overflow node on this BTreeNode
if (DEBUG) {
System.out.println("BTreeNode.insert() Overflow node: " + result._overflow._recid);
}
key = result._overflow.getLargestKey();
overflow = result._overflow._recid;
// update child's largest key
_keys[index] = child.getLargestKey();
// clean result so we can reuse it
result._overflow = null;
}
// if we get here, we need to insert a new entry on the BTreeNode before _children[ index ]
if (!isFull()) {
if (height == 0) {
insertEntry(this, index - 1, key, value);
} else {
insertChild(this, index - 1, key, overflow);
}
_btree._db.update(_recid, this, this);
return result;
}
// node is full, we must divide the node
final byte half = BTree.DEFAULT_SIZE >> 1;
BTreeNode newNode = new BTreeNode(_btree, _isLeaf);
if (index < half) {
// move lower-half of entries to overflow node, including new entry
if (DEBUG) {
System.out.println("BTreeNode.insert() move lower-half of entries to overflow BTreeNode, including new entry.");
}
if (height == 0) {
copyEntries(this, 0, newNode, half, index);
setEntry(newNode, half + index, key, value);
copyEntries(this, index, newNode, half + index + 1, half - index - 1);
} else {
copyChildren(this, 0, newNode, half, index);
setChild(newNode, half + index, key, overflow);
copyChildren(this, index, newNode, half + index + 1, half - index - 1);
}
} else {
// move lower-half of entries to overflow node, new entry stays on this node
if (DEBUG) {
System.out.println("BTreeNode.insert() move lower-half of entries to overflow BTreeNode. New entry stays");
}
if (height == 0) {
copyEntries(this, 0, newNode, half, half);
copyEntries(this, half, this, half - 1, index - half);
setEntry(this, index - 1, key, value);
} else {
copyChildren(this, 0, newNode, half, half);
copyChildren(this, half, this, half - 1, index - half);
setChild(this, index - 1, key, overflow);
}
}
_first = half - 1;
// nullify lower half of entries
for (int i = 0; i < _first; i++) {
if (height == 0) {
setEntry(this, i, null, null);
} else {
setChild(this, i, null, -1);
}
}
if (_isLeaf) {
// link newly created node
newNode._previous = _previous;
newNode._next = _recid;
if (_previous != 0) {
BTreeNode previous = loadNode(_previous);
previous._next = newNode._recid;
_btree._db.update(_previous, previous, this);
}
_previous = newNode._recid;
}
_btree._db.update(_recid, this, this);
_btree._db.update(newNode._recid, newNode, this);
result._overflow = newNode;
return result;
}
/**
* Remove the entry associated with the given key.
*
* @param height Height of the current BTreeNode (zero is leaf node)
* @param key Removal key
* @return Remove result object
*/
RemoveResult remove(int height, K key)
throws IOException {
RemoveResult result;
int half = BTree.DEFAULT_SIZE / 2;
byte index = findChildren(key,true);
height -= 1;
if (height == 0) {
// remove leaf entry
if (compare(_keys[index], key) != 0) {
throw new IllegalArgumentException("Key not found: " + key);
}
result = new RemoveResult();
if (_values[index] instanceof BTreeLazyRecord) {
BTreeLazyRecord r = (BTreeLazyRecord) _values[index];
result._value = r.get();
r.delete();
} else {
result._value = (V) _values[index];
}
removeEntry(this, index);
// update this node
_btree._db.update(_recid, this, this);
} else {
// recurse into Btree to remove entry on a children node
BTreeNode child = loadNode(_children[index]);
result = child.remove(height, key);
// update children
_keys[index] = child.getLargestKey();
_btree._db.update(_recid, this, this);
if (result._underflow) {
// underflow occured
if (child._first != half + 1) {
throw new IllegalStateException("Error during underflow [1]");
}
if (index < _children.length - 1) {
// exists greater brother node
BTreeNode brother = loadNode(_children[index + 1]);
int bfirst = brother._first;
if (bfirst < half) {
// steal entries from "brother" node
int steal = (half - bfirst + 1) / 2;
brother._first += steal;
child._first -= steal;
if (child._isLeaf) {
copyEntries(child, half + 1, child, half + 1 - steal, half - 1);
copyEntries(brother, bfirst, child, 2 * half - steal, steal);
} else {
copyChildren(child, half + 1, child, half + 1 - steal, half - 1);
copyChildren(brother, bfirst, child, 2 * half - steal, steal);
}
for (int i = bfirst; i < bfirst + steal; i++) {
if (brother._isLeaf) {
setEntry(brother, i, null, null);
} else {
setChild(brother, i, null, -1);
}
}
// update child's largest key
_keys[index] = child.getLargestKey();
// no change in previous/next node
// update nodes
_btree._db.update(_recid, this, this);
_btree._db.update(brother._recid, brother, this);
_btree._db.update(child._recid, child, this);
} else {
// move all entries from node "child" to "brother"
if (brother._first != half) {
throw new IllegalStateException("Error during underflow [2]");
}
brother._first = 1;
if (child._isLeaf) {
copyEntries(child, half + 1, brother, 1, half - 1);
} else {
copyChildren(child, half + 1, brother, 1, half - 1);
}
_btree._db.update(brother._recid, brother, this);
// remove "child" from current node
if (_isLeaf) {
copyEntries(this, _first, this, _first + 1, index - _first);
setEntry(this, _first, null, null);
} else {
copyChildren(this, _first, this, _first + 1, index - _first);
setChild(this, _first, null, -1);
}
_first += 1;
_btree._db.update(_recid, this, this);
// re-link previous and next nodes
if (child._previous != 0) {
BTreeNode prev = loadNode(child._previous);
prev._next = child._next;
_btree._db.update(prev._recid, prev, this);
}
if (child._next != 0) {
BTreeNode next = loadNode(child._next);
next._previous = child._previous;
_btree._db.update(next._recid, next, this);
}
// delete "child" node
_btree._db.delete(child._recid);
}
} else {
// node "brother" is before "child"
BTreeNode brother = loadNode(_children[index - 1]);
int bfirst = brother._first;
if (bfirst < half) {
// steal entries from "brother" node
int steal = (half - bfirst + 1) / 2;
brother._first += steal;
child._first -= steal;
if (child._isLeaf) {
copyEntries(brother, 2 * half - steal, child,
half + 1 - steal, steal);
copyEntries(brother, bfirst, brother,
bfirst + steal, 2 * half - bfirst - steal);
} else {
copyChildren(brother, 2 * half - steal, child,
half + 1 - steal, steal);
copyChildren(brother, bfirst, brother,
bfirst + steal, 2 * half - bfirst - steal);
}
for (int i = bfirst; i < bfirst + steal; i++) {
if (brother._isLeaf) {
setEntry(brother, i, null, null);
} else {
setChild(brother, i, null, -1);
}
}
// update brother's largest key
_keys[index - 1] = brother.getLargestKey();
// no change in previous/next node
// update nodes
_btree._db.update(_recid, this, this);
_btree._db.update(brother._recid, brother, this);
_btree._db.update(child._recid, child, this);
} else {
// move all entries from node "brother" to "child"
if (brother._first != half) {
throw new IllegalStateException("Error during underflow [3]");
}
child._first = 1;
if (child._isLeaf) {
copyEntries(brother, half, child, 1, half);
} else {
copyChildren(brother, half, child, 1, half);
}
_btree._db.update(child._recid, child, this);
// remove "brother" from current node
if (_isLeaf) {
copyEntries(this, _first, this, _first + 1, index - 1 - _first);
setEntry(this, _first, null, null);
} else {
copyChildren(this, _first, this, _first + 1, index - 1 - _first);
setChild(this, _first, null, -1);
}
_first += 1;
_btree._db.update(_recid, this, this);
// re-link previous and next nodes
if (brother._previous != 0) {
BTreeNode prev = loadNode(brother._previous);
prev._next = brother._next;
_btree._db.update(prev._recid, prev, this);
}
if (brother._next != 0) {
BTreeNode next = loadNode(brother._next);
next._previous = brother._previous;
_btree._db.update(next._recid, next, this);
}
// delete "brother" node
_btree._db.delete(brother._recid);
}
}
}
}
// underflow if node is more than half-empty
result._underflow = _first > half;
return result;
}
/**
* Find the first children node with a key equal or greater than the given
* key.
*
* @return index of first children with equal or greater key.
*/
private byte findChildren(final K key, final boolean inclusive) {
int left = _first;
int right = BTree.DEFAULT_SIZE - 1;
int middle;
final int D = inclusive?0:1;
// binary search
while (true) {
middle = (left + right) / 2;
if (compare(_keys[middle], key) < D) {
left = middle + 1;
} else {
right = middle;
}
if (left >= right) {
return (byte) right;
}
}
}
/**
* Insert entry at given position.
*/
private static void insertEntry(BTreeNode node, int index,
K key, V value) {
K[] keys = node._keys;
Object[] values = node._values;
int start = node._first;
int count = index - node._first + 1;
// shift entries to the left
System.arraycopy(keys, start, keys, start - 1, count);
System.arraycopy(values, start, values, start - 1, count);
node._first -= 1;
keys[index] = key;
values[index] = value;
}
/**
* Insert child at given position.
*/
private static void insertChild(BTreeNode node, int index,
K key, long child) {
K[] keys = node._keys;
long[] children = node._children;
int start = node._first;
int count = index - node._first + 1;
// shift entries to the left
System.arraycopy(keys, start, keys, start - 1, count);
System.arraycopy(children, start, children, start - 1, count);
node._first -= 1;
keys[index] = key;
children[index] = child;
}
/**
* Remove entry at given position.
*/
private static void removeEntry(BTreeNode node, int index) {
K[] keys = node._keys;
Object[] values = node._values;
int start = node._first;
int count = index - node._first;
System.arraycopy(keys, start, keys, start + 1, count);
keys[start] = null;
System.arraycopy(values, start, values, start + 1, count);
values[start] = null;
node._first++;
}
/**
* Set the entry at the given index.
*/
private static void setEntry(BTreeNode node, int index, K key, V value) {
node._keys[index] = key;
node._values[index] = value;
}
/**
* Set the child BTreeNode recid at the given index.
*/
private static void setChild(BTreeNode node, int index, K key, long recid) {
node._keys[index] = key;
node._children[index] = recid;
}
/**
* Copy entries between two nodes
*/
private static void copyEntries(BTreeNode source, int indexSource,
BTreeNode dest, int indexDest, int count) {
System.arraycopy(source._keys, indexSource, dest._keys, indexDest, count);
System.arraycopy(source._values, indexSource, dest._values, indexDest, count);
}
/**
* Copy child node recids between two nodes
*/
private static void copyChildren(BTreeNode source, int indexSource,
BTreeNode dest, int indexDest, int count) {
System.arraycopy(source._keys, indexSource, dest._keys, indexDest, count);
System.arraycopy(source._children, indexSource, dest._children, indexDest, count);
}
/**
* Load the node at the given recid.
*/
private BTreeNode loadNode(long recid)
throws IOException {
BTreeNode child = _btree._db.fetch(recid, this);
child._recid = recid;
child._btree = _btree;
return child;
}
private final int compare(final K value1, final K value2) {
if (value1 == null) {
return 1;
}
if (value2 == null) {
return -1;
}
if (_btree._comparator == null) {
return ((Comparable) value1).compareTo(value2);
} else {
return _btree._comparator.compare(value1, value2);
}
}
/**
* Dump the structure of the tree on the screen. This is used for debugging
* purposes only.
*/
private void dump(int height) {
String prefix = "";
for (int i = 0; i < height; i++) {
prefix += " ";
}
System.out.println(prefix + "-------------------------------------- BTreeNode recid=" + _recid);
System.out.println(prefix + "first=" + _first);
for (int i = 0; i < BTree.DEFAULT_SIZE; i++) {
if (_isLeaf) {
System.out.println(prefix + "BTreeNode [" + i + "] " + _keys[i] + " " + _values[i]);
} else {
System.out.println(prefix + "BTreeNode [" + i + "] " + _keys[i] + " " + _children[i]);
}
}
System.out.println(prefix + "--------------------------------------");
}
/**
* Recursively dump the state of the BTree on screen. This is used for
* debugging purposes only.
*/
void dumpRecursive(int height, int level)
throws IOException {
height -= 1;
level += 1;
if (height > 0) {
for (byte i = _first; i < BTree.DEFAULT_SIZE; i++) {
if (_keys[i] == null) break;
BTreeNode child = loadNode(_children[i]);
child.dump(level);
child.dumpRecursive(height, level);
}
}
}
/**
* Deserialize the content of an object from a byte array.
*/
@SuppressWarnings("unchecked")
public BTreeNode deserialize(DataInput ois2)
throws IOException {
DataInputOutput ois = (DataInputOutput) ois2;
BTreeNode node = new BTreeNode();
switch (ois.readUnsignedByte()) {
case SerializationHeader.BTREE_NODE_LEAF:
node._isLeaf = true;
break;
case SerializationHeader.BTREE_NODE_NONLEAF:
node._isLeaf = false;
break;
default:
throw new InternalError("wrong BTreeNode header");
}
if (node._isLeaf) {
node._previous = LongPacker.unpackLong(ois);
node._next = LongPacker.unpackLong(ois);
}
node._first = ois.readByte();
if (!node._isLeaf) {
node._children = new long[BTree.DEFAULT_SIZE];
for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) {
node._children[i] = LongPacker.unpackLong(ois);
}
}
if (!_btree.loadValues)
return node;
try {
node._keys = readKeys(ois, node._first);
} catch (ClassNotFoundException except) {
throw new IOException(except.getMessage());
}
if (node._isLeaf) {
try {
readValues(ois, node);
} catch (ClassNotFoundException except) {
throw new IOException(except);
}
}
return node;
}
/**
* Serialize the content of an object into a byte array.
*
* @param obj Object to serialize
* @return a byte array representing the object's state
*/
public void serialize(DataOutput oos, BTreeNode obj)
throws IOException {
// note: It is assumed that BTreeNode instance doing the serialization is the parent
// of the BTreeNode object being serialized.
BTreeNode node = obj;
oos.writeByte(node._isLeaf ? SerializationHeader.BTREE_NODE_LEAF : SerializationHeader.BTREE_NODE_NONLEAF);
if (node._isLeaf) {
LongPacker.packLong(oos, node._previous);
LongPacker.packLong(oos, node._next);
}
oos.write(node._first);
if (!node._isLeaf) {
for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) {
LongPacker.packLong(oos, node._children[i]);
}
}
writeKeys(oos, node._keys, node._first);
if (node._isLeaf && _btree.hasValues()) {
writeValues(oos, node);
}
}
private void readValues(DataInputOutput ois, BTreeNode node) throws IOException, ClassNotFoundException {
node._values = new Object[BTree.DEFAULT_SIZE];
if(_btree.hasValues()){
Serializer serializer = _btree.valueSerializer != null ? _btree.valueSerializer : (Serializer) _btree.getRecordManager().defaultSerializer();
for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) {
int header = ois.readUnsignedByte();
if (header == BTreeLazyRecord.NULL) {
node._values[i] = null;
} else if (header == BTreeLazyRecord.LAZY_RECORD) {
long recid = LongPacker.unpackLong(ois);
node._values[i] = new BTreeLazyRecord(_btree._db, recid, serializer);
} else {
node._values[i] = BTreeLazyRecord.fastDeser(ois, serializer, header);
}
}
}else{
//create fake values
for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) {
if(node._keys[i]!=null)
node._values[i] = Utils.EMPTY_STRING;
}
}
}
private void writeValues(DataOutput oos, BTreeNode node) throws IOException {
DataInputOutput output = null;
Serializer serializer = _btree.valueSerializer != null ? _btree.valueSerializer : _btree.getRecordManager().defaultSerializer();
for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) {
if (node._values[i] instanceof BTreeLazyRecord) {
oos.write(BTreeLazyRecord.LAZY_RECORD);
LongPacker.packLong(oos, ((BTreeLazyRecord) node._values[i]).recid);
} else if (node._values[i] != null) {
if (output == null) {
output = new DataInputOutput();
} else {
output.reset();
}
serializer.serialize(output, node._values[i]);
if (output.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) {
//write as separate record
long recid = _btree._db.insert(output.toByteArray(), BTreeLazyRecord.FAKE_SERIALIZER,true);
oos.write(BTreeLazyRecord.LAZY_RECORD);
LongPacker.packLong(oos, recid);
} else {
//write as part of btree
oos.write(output.getPos());
oos.write(output.getBuf(), 0, output.getPos());
}
} else {
oos.write(BTreeLazyRecord.NULL);
}
}
}
private static final int ALL_NULL = 0;
private static final int ALL_INTEGERS = 1 << 5;
private static final int ALL_INTEGERS_NEGATIVE = 2 << 5;
private static final int ALL_LONGS = 3 << 5;
private static final int ALL_LONGS_NEGATIVE = 4 << 5;
private static final int ALL_STRINGS = 5 << 5;
private static final int ALL_OTHER = 6 << 5;
private K[] readKeys(DataInput ois, final int firstUse) throws IOException, ClassNotFoundException {
Object[] ret = new Object[BTree.DEFAULT_SIZE];
final int type = ois.readUnsignedByte();
if (type == ALL_NULL) {
return (K[]) ret;
} else if (type == ALL_INTEGERS || type == ALL_INTEGERS_NEGATIVE) {
long first = LongPacker.unpackLong(ois);
if (type == ALL_INTEGERS_NEGATIVE)
first = -first;
ret[firstUse] = Integer.valueOf((int) first);
for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) {
// ret[i] = Serialization.readObject(ois);
long v = LongPacker.unpackLong(ois);
if (v == 0) continue; //null
v = v + first;
ret[i] = Integer.valueOf((int) v);
first = v;
}
return (K[]) ret;
} else if (type == ALL_LONGS || type == ALL_LONGS_NEGATIVE) {
long first = LongPacker.unpackLong(ois);
if (type == ALL_LONGS_NEGATIVE)
first = -first;
ret[firstUse] = Long.valueOf(first);
for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) {
//ret[i] = Serialization.readObject(ois);
long v = LongPacker.unpackLong(ois);
if (v == 0) continue; //null
v = v + first;
ret[i] = Long.valueOf(v);
first = v;
}
return (K[]) ret;
} else if (type == ALL_STRINGS) {
byte[] previous = null;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
byte[] b = leadingValuePackRead(ois, previous, 0);
if (b == null) continue;
ret[i] = new String(b,Serialization.UTF8);
previous = b;
}
return (K[]) ret;
} else if (type == ALL_OTHER) {
//TODO why this block is here?
if (_btree.keySerializer == null || _btree.keySerializer == _btree.getRecordManager().defaultSerializer()) {
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
ret[i] = _btree.getRecordManager().defaultSerializer().deserialize(ois);
}
return (K[]) ret;
}
Serializer ser = _btree.keySerializer != null ? _btree.keySerializer : _btree.getRecordManager().defaultSerializer();
DataInputOutput in2 = null;
byte[] previous = null;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
byte[] b = leadingValuePackRead(ois, previous, 0);
if (b == null) continue;
if (in2 == null) {
in2 = new DataInputOutput();
}
in2.reset(b);
ret[i] = ser.deserialize(in2);
previous = b;
}
return (K[]) ret;
} else {
throw new InternalError("unknown BTreeNode header type: " + type);
}
}
@SuppressWarnings("unchecked")
private void writeKeys(DataOutput oos, K[] keys, final int firstUse) throws IOException {
if (keys.length != BTree.DEFAULT_SIZE)
throw new IllegalArgumentException("wrong keys size");
//check if all items on key are null
boolean allNull = true;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] != null) {
allNull = false;
break;
}
}
if (allNull) {
oos.write(ALL_NULL);
return;
}
/**
* Special compression to compress Long and Integer
*/
if ((_btree._comparator == Utils.COMPARABLE_COMPARATOR || _btree._comparator == null) &&
(_btree.keySerializer == null || _btree.keySerializer == _btree.getRecordManager().defaultSerializer())) {
boolean allInteger = true;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] != null && keys[i].getClass() != Integer.class) {
allInteger = false;
break;
}
}
boolean allLong = true;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] != null && (keys[i].getClass() != Long.class ||
//special case to exclude Long.MIN_VALUE from conversion, causes problems to LongPacker
((Long) keys[i]).longValue() == Long.MIN_VALUE)
) {
allLong = false;
break;
}
}
if (allLong) {
//check that diff between MIN and MAX fits into PACKED_LONG
long max = Long.MIN_VALUE;
long min = Long.MAX_VALUE;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] == null) continue;
long v = (Long) keys[i];
if (v > max) max = v;
if (v < min) min = v;
}
//now convert to Double to prevent overflow errors
double max2 = max;
double min2 = min;
double maxDiff = Long.MAX_VALUE;
if (max2 - min2 > maxDiff / 2) // divide by two just to by sure
allLong = false;
}
if (allLong && allInteger)
throw new InternalError();
if (allLong || allInteger) {
long first = ((Number) keys[firstUse]).longValue();
//write header
if (allInteger) {
if (first > 0) oos.write(ALL_INTEGERS);
else oos.write(ALL_INTEGERS_NEGATIVE);
} else if (allLong) {
if (first > 0) oos.write(ALL_LONGS);
else oos.write(ALL_LONGS_NEGATIVE);
} else {
throw new InternalError();
}
//write first
LongPacker.packLong(oos, Math.abs(first));
//write others
for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) {
// Serialization.writeObject(oos, keys[i]);
if (keys[i] == null)
LongPacker.packLong(oos, 0);
else {
long v = ((Number) keys[i]).longValue();
if (v <= first) throw new InternalError("not ordered");
LongPacker.packLong(oos, v - first);
first = v;
}
}
return;
} else {
//another special case for Strings
boolean allString = true;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] != null && (keys[i].getClass() != String.class)
) {
allString = false;
break;
}
}
if (allString) {
oos.write(ALL_STRINGS);
byte[] previous = null;
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] == null) {
leadingValuePackWrite(oos, null, previous, 0);
} else {
byte[] b = ((String) keys[i]).getBytes(Serialization.UTF8);
leadingValuePackWrite(oos, b, previous, 0);
previous = b;
}
}
return;
}
}
}
/**
* other case, serializer is provided or other stuff
*/
oos.write(ALL_OTHER);
if (_btree.keySerializer == null || _btree.keySerializer == _btree.getRecordManager().defaultSerializer()) {
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
_btree.getRecordManager().defaultSerializer().serialize(oos, keys[i]);
}
return;
}
//custom serializer is provided, use it
Serializer ser = _btree.keySerializer;
byte[] previous = null;
DataInputOutput out3 = new DataInputOutput();
for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) {
if (keys[i] == null) {
leadingValuePackWrite(oos, null, previous, 0);
} else {
out3.reset();
ser.serialize(out3, keys[i]);
byte[] b = out3.toByteArray();
leadingValuePackWrite(oos, b, previous, 0);
previous = b;
}
}
}
public void defrag(DBStore r1, DBStore r2) throws IOException {
if (_children != null)
for (long child : _children) {
if (child == 0) continue;
byte[] data = r1.fetchRaw(child);
r2.forceInsert(child, data);
BTreeNode t = deserialize(new DataInputOutput(data));
t._btree = _btree;
t.defrag(r1, r2);
}
}
/**
* STATIC INNER CLASS
* Result from insert() method call
*/
static final class InsertResult {
/**
* Overflow node.
*/
BTreeNode _overflow;
/**
* Existing value for the insertion key.
*/
V _existing;
}
/**
* STATIC INNER CLASS
* Result from remove() method call
*/
static final class RemoveResult {
/**
* Set to true if underlying nodes underflowed
*/
boolean _underflow;
/**
* Removed entry value
*/
V _value;
}
/**
* PRIVATE INNER CLASS
* Browser to traverse leaf nodes.
*/
static final class Browser
implements BTree.BTreeTupleBrowser {
/**
* Current node.
*/
private BTreeNode _node;
/**
* Current index in the node. The index positionned on the next
* tuple to return.
*/
private byte _index;
private int expectedModCount;
/**
* Create a browser.
*
* @param node Current node
* @param index Position of the next tuple to return.
*/
Browser(BTreeNode node, byte index) {
_node = node;
_index = index;
expectedModCount = node._btree.modCount;
}
public boolean getNext(BTree.BTreeTuple tuple)
throws IOException {
if (expectedModCount != _node._btree.modCount)
throw new ConcurrentModificationException();
if (_node == null) {
//last record in iterator was deleted, so iterator is at end of node
return false;
}
if (_index < BTree.DEFAULT_SIZE) {
if (_node._keys[_index] == null) {
// reached end of the tree.
return false;
}
} else if (_node._next != 0) {
// move to next node
_node = _node.loadNode(_node._next);
_index = _node._first;
}
tuple.key = _node._keys[_index];
if (_node._values[_index] instanceof BTreeLazyRecord)
tuple.value = ((BTreeLazyRecord) _node._values[_index]).get();
else
tuple.value = (V) _node._values[_index];
_index++;
return true;
}
public boolean getPrevious(BTree.BTreeTuple tuple)
throws IOException {
if (expectedModCount != _node._btree.modCount)
throw new ConcurrentModificationException();
if (_node == null) {
//deleted last record, but this situation is only supportedd on getNext
throw new InternalError();
}
if (_index == _node._first) {
if (_node._previous != 0) {
_node = _node.loadNode(_node._previous);
_index = BTree.DEFAULT_SIZE;
} else {
// reached beginning of the tree
return false;
}
}
_index--;
tuple.key = _node._keys[_index];
if (_node._values[_index] instanceof BTreeLazyRecord)
tuple.value = ((BTreeLazyRecord) _node._values[_index]).get();
else
tuple.value = (V) _node._values[_index];
return true;
}
public void remove(K key) throws IOException {
if (expectedModCount != _node._btree.modCount)
throw new ConcurrentModificationException();
_node._btree.remove(key);
expectedModCount++;
//An entry was removed and this may trigger tree rebalance,
//This would change current node layout, so find our position again
BTree.BTreeTupleBrowser b = _node._btree.browse(key,true);
//browser is positioned just before value which was currently deleted, so find if we have new value
if (b.getNext(new BTree.BTreeTuple(null, null))) {
//next value value exists, copy its state
Browser b2 = (Browser) b;
this._node = b2._node;
this._index = b2._index;
} else {
this._node = null;
this._index = -1;
}
}
}
/**
* Used for debugging and testing only. Recursively obtains the recids of
* all child BTreeNodes and adds them to the 'out' list.
*
* @param out
* @param height
* @throws IOException
*/
void dumpChildNodeRecIDs(List out, int height)
throws IOException {
height -= 1;
if (height > 0) {
for (byte i = _first; i < BTree.DEFAULT_SIZE; i++) {
if (_children[i] == 0) continue;
BTreeNode child = loadNode(_children[i]);
out.add(new Long(child._recid));
child.dumpChildNodeRecIDs(out, height);
}
}
}
/**
* Read previously written data
*
* @author Kevin Day
*/
static byte[] leadingValuePackRead(DataInput in, byte[] previous, int ignoreLeadingCount) throws IOException {
int len = LongPacker.unpackInt(in) - 1; // 0 indicates null
if (len == -1)
return null;
int actualCommon = LongPacker.unpackInt(in);
byte[] buf = new byte[len];
if (previous == null) {
actualCommon = 0;
}
if (actualCommon > 0) {
in.readFully(buf, 0, ignoreLeadingCount);
System.arraycopy(previous, ignoreLeadingCount, buf, ignoreLeadingCount, actualCommon - ignoreLeadingCount);
}
in.readFully(buf, actualCommon, len - actualCommon);
return buf;
}
/**
* This method is used for delta compression for keys.
* Writes the contents of buf to the DataOutput out, with special encoding if
* there are common leading bytes in the previous group stored by this compressor.
*
* @author Kevin Day
*/
static void leadingValuePackWrite(DataOutput out, byte[] buf, byte[] previous, int ignoreLeadingCount) throws IOException {
if (buf == null) {
LongPacker.packInt(out, 0);
return;
}
int actualCommon = ignoreLeadingCount;
if (previous != null) {
int maxCommon = buf.length > previous.length ? previous.length : buf.length;
if (maxCommon > Short.MAX_VALUE) maxCommon = Short.MAX_VALUE;
for (; actualCommon < maxCommon; actualCommon++) {
if (buf[actualCommon] != previous[actualCommon])
break;
}
}
// there are enough common bytes to justify compression
LongPacker.packInt(out, buf.length + 1);// store as +1, 0 indicates null
LongPacker.packInt(out, actualCommon);
out.write(buf, 0, ignoreLeadingCount);
out.write(buf, actualCommon, buf.length - actualCommon);
}
BTreeNode loadLastChildNode() throws IOException {
return loadNode(_children[BTree.DEFAULT_SIZE - 1]);
}
}
================================================
FILE: src/main/java/org/apache/jdbm/BTreeSet.java
================================================
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jdbm;
import java.util.*;
/**
* Wrapper class for >SortedMap to implement >NavigableSet
*
* This code originally comes from Apache Harmony, was adapted by Jan Kotek for JDBM
*/
class BTreeSet extends AbstractSet implements NavigableSet {
/**
* use keyset from this map
*/
final BTreeMap map;
BTreeSet(BTreeMap map) {
this.map = map;
}
public boolean add(E object) {
return map.put(object, Utils.EMPTY_STRING) == null;
}
public boolean addAll(Collection extends E> collection) {
return super.addAll(collection);
}
public void clear() {
map.clear();
}
public Comparator super E> comparator() {
return map.comparator();
}
public boolean contains(Object object) {
return map.containsKey(object);
}
public boolean isEmpty() {
return map.isEmpty();
}
public E lower(E e) {
return map.lowerKey(e);
}
public E floor(E e) {
return map.floorKey(e);
}
public E ceiling(E e) {
return map.ceilingKey(e);
}
public E higher(E e) {
return map.higherKey(e);
}
public E pollFirst() {
Map.Entry e = map.pollFirstEntry();
return e!=null? e.getKey():null;
}
public E pollLast() {
Map.Entry e = map.pollLastEntry();
return e!=null? e.getKey():null;
}
public Iterator iterator() {
final Iterator> iter = map.entrySet().iterator();
return new Iterator() {
public boolean hasNext() {
return iter.hasNext();
}
public E next() {
Map.Entry e = iter.next();
return e!=null?e.getKey():null;
}
public void remove() {
iter.remove();
}
};
}
public NavigableSet descendingSet() {
return map.descendingKeySet();
}
public Iterator descendingIterator() {
return map.descendingKeySet().iterator();
}
public NavigableSet subSet(E fromElement, boolean fromInclusive, E toElement, boolean toInclusive) {
return map.subMap(fromElement,fromInclusive,toElement,toInclusive).navigableKeySet();
}
public NavigableSet headSet(E toElement, boolean inclusive) {
return map.headMap(toElement,inclusive).navigableKeySet();
}
public NavigableSet tailSet(E fromElement, boolean inclusive) {
return map.tailMap(fromElement,inclusive).navigableKeySet();
}
public boolean remove(Object object) {
return map.remove(object) != null;
}
public int size() {
return map.size();
}
public E first() {
return map.firstKey();
}
public E last() {
return map.lastKey();
}
public SortedSet subSet(E start, E end) {
Comparator super E> c = map.comparator();
int compare = (c == null) ? ((Comparable) start).compareTo(end) : c
.compare(start, end);
if (compare <= 0) {
return new BTreeSet((BTreeMap) map.subMap(start, true,end,false));
}
throw new IllegalArgumentException();
}
public SortedSet headSet(E end) {
// Check for errors
Comparator super E> c = map.comparator();
if (c == null) {
((Comparable) end).compareTo(end);
} else {
c.compare(end, end);
}
return new BTreeSet((BTreeMap) map.headMap(end,false));
}
public SortedSet tailSet(E start) {
// Check for errors
Comparator super E> c = map.comparator();
if (c == null) {
((Comparable) start).compareTo(start);
} else {
c.compare(start, start);
}
return new BTreeSet((BTreeMap) map.tailMap(start,true));
}
}
================================================
FILE: src/main/java/org/apache/jdbm/DB.java
================================================
package org.apache.jdbm;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentNavigableMap;
/**
* Database is root class for creating and loading persistent collections. It also contains
* transaction operations.
* //TODO just write some readme
*
*
* @author Jan Kotek
* @author Alex Boisvert
* @author Cees de Groot
*/
public interface DB {
/**
* Closes the DB and release resources.
* DB can not be used after it was closed
*/
void close();
/** @return true if db was already closed */
boolean isClosed();
/**
* Clear cache and remove all entries it contains.
* This may be useful for some Garbage Collection when reference cache is used.
*/
void clearCache();
/**
* Defragments storage so it consumes less space.
* It basically copyes all records into different store and then renames it, replacing original store.
*
* Defrag has two steps: In first collections are rearranged, so records in collection are close to each other,
* and read speed is improved. In second step all records are sequentially transferred, reclaiming all unused space.
* First step is optinal and may slow down defragmentation significantly as ut requires many random-access reads.
* Second step reads and writes data sequentially and is very fast, comparable to copying files to new location.
*
*
* This commits any uncommited data. Defrag also requires free space, as store is basically recreated at new location.
*
* @param sortCollections if collection records should be rearranged during defragment, this takes some extra time
*/
void defrag(boolean sortCollections);
/**
* Commit (make persistent) all changes since beginning of transaction.
* JDBM supports only single transaction.
*/
void commit();
/**
* Rollback (cancel) all changes since beginning of transaction.
* JDBM supports only single transaction.
* This operations affects all maps created or loaded by this DB.
*/
void rollback();
/**
* This calculates some database statistics such as collection sizes and record distributions.
* Can be useful for performance optimalisations and trouble shuting.
* This method can run for very long time.
*
* @return statistics contained in string
*/
String calculateStatistics();
/**
* Copy database content into ZIP file
* @param zipFile
*/
void copyToZip(String zipFile);
/**
* Get a Map which was already created and saved in DB.
* This map uses disk based H*Tree and should have similar performance
* as HashMap.
*
* @param name of hash map
*
* @return map
*/
ConcurrentMap getHashMap(String name);
/**
* Creates Map which persists data into DB.
*
* @param name record name
* @return
*/
ConcurrentMap createHashMap(String name);
/**
* Creates Hash Map which persists data into DB.
* Map will use custom serializers for Keys and Values.
* Leave keySerializer null to use default serializer for keys
*
* @param Key type
* @param Value type
* @param name record name
* @param keySerializer serializer to be used for Keys, leave null to use default serializer
* @param valueSerializer serializer to be used for Values
* @return
*/
ConcurrentMap createHashMap(String name, Serializer keySerializer, Serializer